🌐 AI搜索 & 代理 主页
Skip to content

Commit 0c17f81

Browse files
Updated table stats backend, modified table_size_detailed query
1 parent f636efb commit 0c17f81

File tree

2 files changed

+80
-59
lines changed

2 files changed

+80
-59
lines changed

config/pgwatch-prometheus/metrics.yml

Lines changed: 22 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,7 @@ metrics:
285285
from
286286
pg_locks
287287
where
288-
pid != pg_backend_pid()
288+
pid <> pg_backend_pid()
289289
and database = (select oid from pg_database where datname = current_database())
290290
limit 5000
291291
)
@@ -368,7 +368,7 @@ metrics:
368368
category as tag_category,
369369
vartype as tag_vartype,
370370
case when setting ~ '^-?[0-9]+$' then setting::bigint else null end as numeric_value,
371-
case when source != 'default' then 0 else 1 end as is_default,
371+
case when source <> 'default' then 0 else 1 end as is_default,
372372
1 as configured
373373
from pg_settings
374374
gauges:
@@ -390,7 +390,7 @@ metrics:
390390
from pg_class c
391391
join pg_namespace n on n.oid = c.relnamespace
392392
where relkind in ('p', 'r')
393-
and relpersistence != 't'
393+
and relpersistence <> 't'
394394
and not n.nspname like any (array[E'pg\\_%', 'information_schema', E'\\_timescaledb%'])
395395
and not exists(select * from pg_inherits where inhrelid = c.oid)
396396
and exists(select * from pg_inherits where inhparent = c.oid)
@@ -414,7 +414,7 @@ metrics:
414414
pg_table_size(relid) as table_size_b,
415415
abs(greatest(ceil(log((pg_table_size(relid) + 1) / 10 ^ 6)), 0))::text as tag_table_size_cardinality_mb, -- i.e. 0=<1MB, 1=<10MB, 2=<100MB,..
416416
pg_total_relation_size(relid) as total_relation_size_b,
417-
case when reltoastrelid != 0 then pg_total_relation_size(reltoastrelid) else 0::int8 end as toast_size_b,
417+
case when reltoastrelid <> 0 then pg_total_relation_size(reltoastrelid) else 0::int8 end as toast_size_b,
418418
(extract(epoch from now() - greatest(last_vacuum, last_autovacuum)))::int8 as seconds_since_last_vacuum,
419419
(extract(epoch from now() - greatest(last_analyze, last_autoanalyze)))::int8 as seconds_since_last_analyze,
420420
case when 'autovacuum_enabled=off' = ANY (c.reloptions) then 1 else 0 end as no_autovacuum,
@@ -432,14 +432,14 @@ metrics:
432432
autovacuum_count,
433433
analyze_count,
434434
autoanalyze_count,
435-
case when c.relkind != 'p' then age(c.relfrozenxid) else 0 end as tx_freeze_age
435+
case when c.relkind <> 'p' then age(c.relfrozenxid) else 0 end as tx_freeze_age
436436
from pg_stat_user_tables ut
437437
join
438438
pg_class c on c.oid = ut.relid
439439
where
440440
-- leaving out fully locked tables as pg_relation_size also wants a lock and would wait
441441
not exists(select 1 from pg_locks where relation = relid and mode = 'AccessExclusiveLock')
442-
and c.relpersistence != 't' -- and temp tables
442+
and c.relpersistence <> 't' -- and temp tables
443443
)
444444
445445
select /* pgwatch_generated */
@@ -526,7 +526,7 @@ metrics:
526526
from pg_class c
527527
join pg_namespace n on n.oid = c.relnamespace
528528
where relkind in ('p', 'r')
529-
and relpersistence != 't'
529+
and relpersistence <> 't'
530530
and not n.nspname like any (array[E'pg\\_%', 'information_schema', E'\\_timescaledb%'])
531531
and not exists(select * from pg_inherits where inhrelid = c.oid)
532532
and exists(select * from pg_inherits where inhparent = c.oid)
@@ -550,7 +550,7 @@ metrics:
550550
pg_table_size(relid) as table_size_b,
551551
abs(greatest(ceil(log((pg_table_size(relid) + 1) / 10 ^ 6)), 0))::text as tag_table_size_cardinality_mb, -- i.e. 0=<1MB, 1=<10MB, 2=<100MB,..
552552
pg_total_relation_size(relid) as total_relation_size_b,
553-
case when c.reltoastrelid != 0 then pg_total_relation_size(c.reltoastrelid) else 0::int8 end as toast_size_b,
553+
case when c.reltoastrelid <> 0 then pg_total_relation_size(c.reltoastrelid) else 0::int8 end as toast_size_b,
554554
(extract(epoch from now() - greatest(last_vacuum, last_autovacuum)))::int8 as seconds_since_last_vacuum,
555555
(extract(epoch from now() - greatest(last_analyze, last_autoanalyze)))::int8 as seconds_since_last_analyze,
556556
case when 'autovacuum_enabled=off' = ANY (c.reloptions) then 1 else 0 end as no_autovacuum,
@@ -568,7 +568,7 @@ metrics:
568568
autovacuum_count,
569569
analyze_count,
570570
autoanalyze_count,
571-
case when c.relkind != 'p' then age(c.relfrozenxid) else 0 end as tx_freeze_age,
571+
case when c.relkind <> 'p' then age(c.relfrozenxid) else 0 end as tx_freeze_age,
572572
extract(epoch from now() - last_seq_scan)::int8 as last_seq_scan_s
573573
from pg_stat_user_tables ut
574574
join pg_class c on c.oid = ut.relid
@@ -578,7 +578,7 @@ metrics:
578578
where
579579
-- leaving out fully locked tables as pg_relation_size also wants a lock and would wait
580580
not exists (select 1 from pg_locks where relation = relid and mode = 'AccessExclusiveLock')
581-
and c.relpersistence != 't' -- and temp tables
581+
and c.relpersistence <> 't' -- and temp tables
582582
order by case when c.relkind = 'p' then 1e9::int else coalesce(c.relpages, 0) + coalesce(t.relpages, 0) + coalesce(tir.relpages, 0) end desc
583583
limit 1500 /* NB! When changing the bottom final LIMIT also adjust this limit. Should be at least 5x bigger as approx sizes depend a lot on vacuum frequency.
584584
The general idea is to reduce filesystem "stat"-ing on tables that won't make it to final output anyways based on approximate size */
@@ -1328,40 +1328,24 @@ metrics:
13281328
current_database() as tag_datname,
13291329
n.nspname as tag_schema,
13301330
c.relname as tag_table_name,
1331-
n.nspname || '.' || c.relname as tag_table_full_name,
13321331
c.oid as table_oid,
13331332
c.reltoastrelid,
13341333
-- Main table size components (bytes)
13351334
pg_relation_size(c.oid, 'main') as table_main_size_b,
13361335
pg_relation_size(c.oid, 'fsm') as table_fsm_size_b,
13371336
pg_relation_size(c.oid, 'vm') as table_vm_size_b,
13381337
pg_indexes_size(c.oid) as table_indexes_size_b,
1339-
-- TOAST table components (if exists)
1340-
case
1341-
when c.reltoastrelid != 0 then pg_relation_size(c.reltoastrelid, 'main')
1342-
else 0::int8
1343-
end as toast_main_size_b,
1344-
case
1345-
when c.reltoastrelid != 0 then pg_relation_size(c.reltoastrelid, 'fsm')
1346-
else 0::int8
1347-
end as toast_fsm_size_b,
1348-
case
1349-
when c.reltoastrelid != 0 then pg_relation_size(c.reltoastrelid, 'vm')
1350-
else 0::int8
1351-
end as toast_vm_size_b,
1352-
case
1353-
when c.reltoastrelid != 0 then pg_indexes_size(c.reltoastrelid)
1354-
else 0::int8
1355-
end as toast_indexes_size_b,
1356-
-- Calculated totals for reference
1357-
pg_table_size(c.oid) as table_total_size_b,
1358-
pg_total_relation_size(c.oid) as relation_total_size_b,
1359-
-- Size cardinality for grouping (logarithmic scale)
1338+
pg_relation_size(c.reltoastrelid, 'main') as toast_main_size_b,
1339+
pg_relation_size(c.reltoastrelid, 'fsm') as toast_fsm_size_b,
1340+
pg_relation_size(c.reltoastrelid, 'vm') as toast_vm_size_b,
1341+
pg_indexes_size(c.reltoastrelid) as toast_indexes_size_b,
1342+
pg_total_relation_size(c.reltoastrelid) as toast_total_size_b,
1343+
pg_total_relation_size(c.oid) as total_relation_size_b,
13601344
abs(greatest(ceil(log((pg_total_relation_size(c.oid) + 1) / 10 ^ 6)), 0))::text as tag_size_cardinality_mb
13611345
from pg_class c
13621346
join pg_namespace n on n.oid = c.relnamespace
1363-
where c.relkind in ('r', 'p') -- regular tables and partitioned tables
1364-
and n.nspname not in ('information_schema', 'pg_catalog', 'pg_toast')
1347+
where c.relkind in ('r', 'p', 'm') -- regular tables, partitioned tables, materialized views
1348+
and n.nspname not in ('information_schema', 'pg_toast')
13651349
and not exists (
13661350
select 1 from pg_locks
13671351
where relation = c.oid and mode = 'AccessExclusiveLock'
@@ -1373,7 +1357,6 @@ metrics:
13731357
tag_datname,
13741358
tag_schema,
13751359
tag_table_name,
1376-
tag_table_full_name,
13771360
tag_size_cardinality_mb,
13781361
table_main_size_b,
13791362
table_fsm_size_b,
@@ -1383,16 +1366,10 @@ metrics:
13831366
toast_fsm_size_b,
13841367
toast_vm_size_b,
13851368
toast_indexes_size_b,
1386-
table_total_size_b,
1387-
relation_total_size_b,
1388-
-- Additional calculated metrics
1389-
(table_main_size_b + toast_main_size_b) as actual_data_size_b,
1390-
(table_fsm_size_b + table_vm_size_b + toast_fsm_size_b + toast_vm_size_b) as metadata_size_b,
1391-
(table_indexes_size_b + toast_indexes_size_b) as total_indexes_size_b,
1369+
total_relation_size_b,
13921370
(toast_main_size_b + toast_fsm_size_b + toast_vm_size_b + toast_indexes_size_b) as total_toast_size_b
13931371
from table_sizes
1394-
where table_main_size_b > 0 -- only include tables with actual data
1395-
order by relation_total_size_b desc
1372+
where total_relation_size_b > 0
13961373
gauges:
13971374
- table_main_size_b
13981375
- table_fsm_size_b
@@ -1402,13 +1379,12 @@ metrics:
14021379
- toast_fsm_size_b
14031380
- toast_vm_size_b
14041381
- toast_indexes_size_b
1405-
- table_total_size_b
1406-
- relation_total_size_b
1382+
- total_relation_size_b
14071383
- actual_data_size_b
14081384
- metadata_size_b
14091385
- total_indexes_size_b
14101386
- total_toast_size_b
1411-
statement_timeout_seconds: 300
1387+
statement_timeout_seconds: 15
14121388
pg_invalid_indexes:
14131389
description: >
14141390
This metric identifies invalid indexes in the database.

flask-backend/app.py

Lines changed: 58 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -484,10 +484,11 @@ def get_btree_bloat_csv():
484484
logger.error(f"Error processing btree bloat request: {e}")
485485
return jsonify({"error": str(e)}), 500
486486

487-
@app.route('/table_bloat/csv', methods=['GET'])
488-
def get_table_bloat_csv():
487+
@app.route('/table_info/csv', methods=['GET'])
488+
def get_table_info_csv():
489489
"""
490-
Get the most recent pg_table_bloat metrics as a CSV table.
490+
Get comprehensive table information including bloat metrics and detailed size information as a CSV table.
491+
Combines pg_table_bloat and table_size_detailed metrics for complete table analysis.
491492
"""
492493
try:
493494
# Get query parameters
@@ -513,14 +514,27 @@ def get_table_bloat_csv():
513514
filter_str = '{' + ','.join(filters) + '}' if filters else ''
514515

515516
# Metrics to fetch with last_over_time to get only the most recent value
517+
# Include both bloat metrics and detailed size metrics
516518
metric_queries = [
519+
# Bloat metrics
517520
f'last_over_time(pgwatch_pg_table_bloat_real_size_mib{filter_str}[1d])',
518521
f'last_over_time(pgwatch_pg_table_bloat_extra_size{filter_str}[1d])',
519522
f'last_over_time(pgwatch_pg_table_bloat_extra_pct{filter_str}[1d])',
520523
f'last_over_time(pgwatch_pg_table_bloat_fillfactor{filter_str}[1d])',
521524
f'last_over_time(pgwatch_pg_table_bloat_bloat_size{filter_str}[1d])',
522525
f'last_over_time(pgwatch_pg_table_bloat_bloat_pct{filter_str}[1d])',
523526
f'last_over_time(pgwatch_pg_table_bloat_is_na{filter_str}[1d])',
527+
# Detailed size metrics
528+
f'last_over_time(pgwatch_table_size_detailed_table_main_size_b{filter_str}[1d])',
529+
f'last_over_time(pgwatch_table_size_detailed_table_fsm_size_b{filter_str}[1d])',
530+
f'last_over_time(pgwatch_table_size_detailed_table_vm_size_b{filter_str}[1d])',
531+
f'last_over_time(pgwatch_table_size_detailed_table_indexes_size_b{filter_str}[1d])',
532+
f'last_over_time(pgwatch_table_size_detailed_toast_main_size_b{filter_str}[1d])',
533+
f'last_over_time(pgwatch_table_size_detailed_toast_fsm_size_b{filter_str}[1d])',
534+
f'last_over_time(pgwatch_table_size_detailed_toast_vm_size_b{filter_str}[1d])',
535+
f'last_over_time(pgwatch_table_size_detailed_toast_indexes_size_b{filter_str}[1d])',
536+
f'last_over_time(pgwatch_table_size_detailed_total_relation_size_b{filter_str}[1d])',
537+
f'last_over_time(pgwatch_table_size_detailed_total_toast_size_b{filter_str}[1d])',
524538
]
525539

526540
prom = get_prometheus_client()
@@ -542,20 +556,45 @@ def get_table_bloat_csv():
542556
'schemaname': metric_labels.get('schemaname', ''),
543557
'tblname': metric_labels.get('tblname', ''),
544558
}
559+
value = float(entry['value'][1])
560+
561+
# Bloat metrics
545562
if 'real_size_mib' in query:
546-
metric_results[key]['real_size_mib'] = float(entry['value'][1])
563+
metric_results[key]['real_size_mib'] = value
547564
elif 'extra_size' in query and 'extra_pct' not in query:
548-
metric_results[key]['extra_size'] = float(entry['value'][1])
565+
metric_results[key]['extra_size'] = value
549566
elif 'extra_pct' in query:
550-
metric_results[key]['extra_pct'] = float(entry['value'][1])
567+
metric_results[key]['extra_pct'] = value
551568
elif 'fillfactor' in query:
552-
metric_results[key]['fillfactor'] = float(entry['value'][1])
569+
metric_results[key]['fillfactor'] = value
553570
elif 'bloat_size' in query:
554-
metric_results[key]['bloat_size'] = float(entry['value'][1])
571+
metric_results[key]['bloat_size'] = value
555572
elif 'bloat_pct' in query:
556-
metric_results[key]['bloat_pct'] = float(entry['value'][1])
573+
metric_results[key]['bloat_pct'] = value
557574
elif 'is_na' in query:
558-
metric_results[key]['is_na'] = int(float(entry['value'][1]))
575+
metric_results[key]['is_na'] = int(value)
576+
577+
# Size metrics (convert bytes to MiB for consistency)
578+
elif 'table_main_size_b' in query:
579+
metric_results[key]['table_main_size_mib'] = value / (1024 * 1024)
580+
elif 'table_fsm_size_b' in query:
581+
metric_results[key]['table_fsm_size_mib'] = value / (1024 * 1024)
582+
elif 'table_vm_size_b' in query:
583+
metric_results[key]['table_vm_size_mib'] = value / (1024 * 1024)
584+
elif 'table_indexes_size_b' in query:
585+
metric_results[key]['table_indexes_size_mib'] = value / (1024 * 1024)
586+
elif 'toast_main_size_b' in query:
587+
metric_results[key]['toast_main_size_mib'] = value / (1024 * 1024)
588+
elif 'toast_fsm_size_b' in query:
589+
metric_results[key]['toast_fsm_size_mib'] = value / (1024 * 1024)
590+
elif 'toast_vm_size_b' in query:
591+
metric_results[key]['toast_vm_size_mib'] = value / (1024 * 1024)
592+
elif 'toast_indexes_size_b' in query:
593+
metric_results[key]['toast_indexes_size_mib'] = value / (1024 * 1024)
594+
elif 'total_relation_size_b' in query:
595+
metric_results[key]['total_relation_size_mib'] = value / (1024 * 1024)
596+
elif 'total_toast_size_b' in query:
597+
metric_results[key]['total_toast_size_mib'] = value / (1024 * 1024)
559598
except Exception as e:
560599
logger.warning(f"Failed to query: {query}, error: {e}")
561600
continue
@@ -564,8 +603,14 @@ def get_table_bloat_csv():
564603
output = io.StringIO()
565604
fieldnames = [
566605
'database', 'schemaname', 'tblname',
606+
# Bloat metrics
567607
'real_size_mib', 'extra_size', 'extra_pct', 'fillfactor',
568-
'bloat_size', 'bloat_pct', 'is_na'
608+
'bloat_size', 'bloat_pct', 'is_na',
609+
# Size metrics (all in MiB)
610+
'table_main_size_mib', 'table_fsm_size_mib', 'table_vm_size_mib',
611+
'table_indexes_size_mib', 'toast_main_size_mib', 'toast_fsm_size_mib',
612+
'toast_vm_size_mib', 'toast_indexes_size_mib', 'total_relation_size_mib',
613+
'total_toast_size_mib'
569614
]
570615
writer = csv.DictWriter(output, fieldnames=fieldnames)
571616
writer.writeheader()
@@ -578,11 +623,11 @@ def get_table_bloat_csv():
578623
# Create response
579624
response = make_response(csv_content)
580625
response.headers['Content-Type'] = 'text/csv'
581-
response.headers['Content-Disposition'] = 'attachment; filename=table_bloat_latest.csv'
626+
response.headers['Content-Disposition'] = 'attachment; filename=table_info_latest.csv'
582627
return response
583628

584629
except Exception as e:
585-
logger.error(f"Error processing table bloat request: {e}")
630+
logger.error(f"Error processing table info request: {e}")
586631
return jsonify({"error": str(e)}), 500
587632

588633
if __name__ == '__main__':

0 commit comments

Comments
 (0)