@@ -337,17 +337,45 @@ def list_metrics():
337
337
except Exception as e :
338
338
return jsonify ({"error" : str (e )}), 500
339
339
340
+ @app .route ('/debug/metrics' , methods = ['GET' ])
341
+ def debug_metrics ():
342
+ """
343
+ Debug endpoint to check what metrics are actually available in Prometheus
344
+ """
345
+ try :
346
+ prom = get_prometheus_client ()
347
+
348
+ # Get all available metrics
349
+ all_metrics = prom .all_metrics ()
350
+
351
+ # Filter for pg_btree_bloat metrics
352
+ btree_metrics = [m for m in all_metrics if 'btree_bloat' in m ]
353
+
354
+ # Get sample data for each btree metric
355
+ sample_data = {}
356
+ for metric in btree_metrics [:5 ]: # Limit to first 5 to avoid overwhelming
357
+ try :
358
+ result = prom .get_current_metric_value (metric_name = metric )
359
+ sample_data [metric ] = {
360
+ 'count' : len (result ),
361
+ 'sample_labels' : [entry .get ('metric' , {}) for entry in result [:2 ]] # First 2 entries
362
+ }
363
+ except Exception as e :
364
+ sample_data [metric ] = {'error' : str (e )}
365
+
366
+ return jsonify ({
367
+ 'all_metrics_count' : len (all_metrics ),
368
+ 'btree_metrics' : btree_metrics ,
369
+ 'sample_data' : sample_data
370
+ })
371
+ except Exception as e :
372
+ return jsonify ({"error" : str (e )}), 500
373
+
374
+
340
375
@app .route ('/btree_bloat/csv' , methods = ['GET' ])
341
376
def get_btree_bloat_csv ():
342
377
"""
343
- Get current pg_btree_bloat metrics as a CSV table.
344
-
345
- Query parameters:
346
- - cluster_name: Cluster name filter (optional)
347
- - node_name: Node name filter (optional)
348
- - schemaname: Schema name filter (optional)
349
- - tblname: Table name filter (optional)
350
- - idxname: Index name filter (optional)
378
+ Get the most recent pg_btree_bloat metrics as a CSV table.
351
379
"""
352
380
try :
353
381
# Get query parameters
@@ -372,24 +400,28 @@ def get_btree_bloat_csv():
372
400
filters .append (f'idxname="{ idxname } "' )
373
401
if db_name :
374
402
filters .append (f'datname="{ db_name } "' )
403
+
375
404
filter_str = '{' + ',' .join (filters ) + '}' if filters else ''
376
405
377
- # Metrics to fetch
378
- metric_names = [
379
- ' pgwatch_pg_btree_bloat_real_size_mib' ,
380
- ' pgwatch_pg_btree_bloat_extra_size' ,
381
- ' pgwatch_pg_btree_bloat_extra_pct' ,
382
- ' pgwatch_pg_btree_bloat_fillfactor' ,
383
- ' pgwatch_pg_btree_bloat_bloat_size' ,
384
- ' pgwatch_pg_btree_bloat_bloat_pct' ,
385
- ' pgwatch_pg_btree_bloat_is_na' ,
406
+ # Metrics to fetch with last_over_time to get only the most recent value
407
+ metric_queries = [
408
+ f'last_over_time( pgwatch_pg_btree_bloat_real_size_mib{ filter_str } [1d]) ' ,
409
+ f'last_over_time( pgwatch_pg_btree_bloat_extra_size{ filter_str } [1d]) ' ,
410
+ f'last_over_time( pgwatch_pg_btree_bloat_extra_pct{ filter_str } [1d]) ' ,
411
+ f'last_over_time( pgwatch_pg_btree_bloat_fillfactor{ filter_str } [1d]) ' ,
412
+ f'last_over_time( pgwatch_pg_btree_bloat_bloat_size{ filter_str } [1d]) ' ,
413
+ f'last_over_time( pgwatch_pg_btree_bloat_bloat_pct{ filter_str } [1d]) ' ,
414
+ f'last_over_time( pgwatch_pg_btree_bloat_is_na{ filter_str } [1d]) ' ,
386
415
]
416
+
387
417
prom = get_prometheus_client ()
388
- # Fetch all metrics
389
418
metric_results = {}
390
- for metric in metric_names :
419
+
420
+ for query in metric_queries :
391
421
try :
392
- result = prom .get_current_metric_value (metric_name = metric + filter_str )
422
+ # Use custom_query instead of get_current_metric_value
423
+ result = prom .custom_query (query = query )
424
+
393
425
for entry in result :
394
426
metric_labels = entry .get ('metric' , {})
395
427
key = (
@@ -398,30 +430,33 @@ def get_btree_bloat_csv():
398
430
metric_labels .get ('tblname' , '' ),
399
431
metric_labels .get ('idxname' , '' )
400
432
)
433
+
401
434
if key not in metric_results :
402
435
metric_results [key ] = {
403
436
'database' : metric_labels .get ('datname' , '' ),
404
437
'schemaname' : metric_labels .get ('schemaname' , '' ),
405
438
'tblname' : metric_labels .get ('tblname' , '' ),
406
439
'idxname' : metric_labels .get ('idxname' , '' ),
407
440
}
408
- logger .warning (f"metric: { metric } " )
409
- if metric .endswith ('real_size_mib' ):
410
- metric_results [key ]['real_size_mib' ] = float (entry ['value' ][1 ]) if entry .get ('value' ) else None
411
- elif metric .endswith ('extra_size' ):
412
- metric_results [key ]['extra_size' ] = float (entry ['value' ][1 ]) if entry .get ('value' ) else None
413
- elif metric .endswith ('extra_pct' ):
414
- metric_results [key ]['extra_pct' ] = float (entry ['value' ][1 ]) if entry .get ('value' ) else None
415
- elif metric .endswith ('fillfactor' ):
416
- metric_results [key ]['fillfactor' ] = float (entry ['value' ][1 ]) if entry .get ('value' ) else None
417
- elif metric .endswith ('bloat_size' ):
418
- metric_results [key ]['bloat_size' ] = float (entry ['value' ][1 ]) if entry .get ('value' ) else None
419
- elif metric .endswith ('bloat_pct' ):
420
- metric_results [key ]['bloat_pct' ] = float (entry ['value' ][1 ]) if entry .get ('value' ) else None
421
- elif metric .endswith ('is_na' ):
422
- metric_results [key ]['is_na' ] = int (float (entry ['value' ][1 ])) if entry .get ('value' ) else None
441
+
442
+ # Extract metric type from query and store value
443
+ if 'real_size_mib' in query :
444
+ metric_results [key ]['real_size_mib' ] = float (entry ['value' ][1 ])
445
+ elif 'extra_size' in query and 'extra_pct' not in query :
446
+ metric_results [key ]['extra_size' ] = float (entry ['value' ][1 ])
447
+ elif 'extra_pct' in query :
448
+ metric_results [key ]['extra_pct' ] = float (entry ['value' ][1 ])
449
+ elif 'fillfactor' in query :
450
+ metric_results [key ]['fillfactor' ] = float (entry ['value' ][1 ])
451
+ elif 'bloat_size' in query :
452
+ metric_results [key ]['bloat_size' ] = float (entry ['value' ][1 ])
453
+ elif 'bloat_pct' in query :
454
+ metric_results [key ]['bloat_pct' ] = float (entry ['value' ][1 ])
455
+ elif 'is_na' in query :
456
+ metric_results [key ]['is_na' ] = int (float (entry ['value' ][1 ]))
457
+
423
458
except Exception as e :
424
- logger .warning (f"Failed to query metric { metric } : { e } " )
459
+ logger .warning (f"Failed to query: { query } , error : { e } " )
425
460
continue
426
461
427
462
# Prepare CSV output
@@ -435,14 +470,16 @@ def get_btree_bloat_csv():
435
470
writer .writeheader ()
436
471
for row in metric_results .values ():
437
472
writer .writerow (row )
473
+
438
474
csv_content = output .getvalue ()
439
475
output .close ()
440
476
441
477
# Create response
442
478
response = make_response (csv_content )
443
479
response .headers ['Content-Type' ] = 'text/csv'
444
- response .headers ['Content-Disposition' ] = 'attachment; filename=btree_bloat_metrics .csv'
480
+ response .headers ['Content-Disposition' ] = 'attachment; filename=btree_bloat_latest .csv'
445
481
return response
482
+
446
483
except Exception as e :
447
484
logger .error (f"Error processing btree bloat request: { e } " )
448
485
return jsonify ({"error" : str (e )}), 500
0 commit comments