diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index de4a56a..64055bc 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -46,6 +46,7 @@ cli:e2e:dind: variables: DOCKER_HOST: tcp://docker:2375 DOCKER_TLS_CERTDIR: "" + DOCKER_API_VERSION: "1.43" GIT_STRATEGY: fetch before_script: - apk add --no-cache bash curl git coreutils docker-cli docker-compose openssl @@ -117,6 +118,7 @@ cli:node:e2e:dind: variables: DOCKER_HOST: tcp://docker:2375 DOCKER_TLS_CERTDIR: "" + DOCKER_API_VERSION: "1.43" GIT_STRATEGY: fetch before_script: - corepack enable || true @@ -139,6 +141,7 @@ cli:node:full:dind: variables: DOCKER_HOST: tcp://docker:2375 DOCKER_TLS_CERTDIR: "" + DOCKER_API_VERSION: "1.43" GIT_STRATEGY: fetch before_script: - corepack enable || true diff --git a/cli/lib/issues.ts b/cli/lib/issues.ts index 4231778..356885c 100644 --- a/cli/lib/issues.ts +++ b/cli/lib/issues.ts @@ -2,13 +2,62 @@ import * as https from "https"; import { URL } from "url"; import { maskSecret, normalizeBaseUrl } from "./util"; +export interface IssueActionItem { + id: string; + issue_id: string; + title: string; + description: string | null; + severity: number; + is_done: boolean; + done_by: number | null; + done_at: string | null; + created_at: string; + updated_at: string; +} + +export interface Issue { + id: string; + title: string; + description: string | null; + created_at: string; + updated_at: string; + status: number; + url_main: string | null; + urls_extra: string[] | null; + data: unknown | null; + author_id: number; + org_id: number; + project_id: number | null; + is_ai_generated: boolean; + assigned_to: number[] | null; + labels: string[] | null; + is_edited: boolean; + author_display_name: string; + comment_count: number; + action_items: IssueActionItem[]; +} + +export interface IssueComment { + id: string; + issue_id: string; + author_id: number; + parent_comment_id: string | null; + content: string; + created_at: string; + updated_at: string; + data: unknown | null; +} + +export type IssueListItem = Pick; + +export type IssueDetail = Pick; export interface FetchIssuesParams { apiKey: string; apiBaseUrl: string; debug?: boolean; } -export async function fetchIssues(params: FetchIssuesParams): Promise { +export async function fetchIssues(params: FetchIssuesParams): Promise { const { apiKey, apiBaseUrl, debug } = params; if (!apiKey) { throw new Error("API key is required"); @@ -16,6 +65,7 @@ export async function fetchIssues(params: FetchIssuesParams): Promise { const base = normalizeBaseUrl(apiBaseUrl); const url = new URL(`${base}/issues`); + url.searchParams.set("select", "id,title,status,created_at"); const headers: Record = { "access-token": apiKey, @@ -54,10 +104,10 @@ export async function fetchIssues(params: FetchIssuesParams): Promise { } if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) { try { - const parsed = JSON.parse(data); + const parsed = JSON.parse(data) as IssueListItem[]; resolve(parsed); } catch { - resolve(data); + reject(new Error(`Failed to parse issues response: ${data}`)); } } else { let errMsg = `Failed to fetch issues: HTTP ${res.statusCode}`; @@ -88,7 +138,7 @@ export interface FetchIssueCommentsParams { debug?: boolean; } -export async function fetchIssueComments(params: FetchIssueCommentsParams): Promise { +export async function fetchIssueComments(params: FetchIssueCommentsParams): Promise { const { apiKey, apiBaseUrl, issueId, debug } = params; if (!apiKey) { throw new Error("API key is required"); @@ -137,10 +187,10 @@ export async function fetchIssueComments(params: FetchIssueCommentsParams): Prom } if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) { try { - const parsed = JSON.parse(data); + const parsed = JSON.parse(data) as IssueComment[]; resolve(parsed); } catch { - resolve(data); + reject(new Error(`Failed to parse issue comments response: ${data}`)); } } else { let errMsg = `Failed to fetch issue comments: HTTP ${res.statusCode}`; @@ -170,7 +220,7 @@ export interface FetchIssueParams { debug?: boolean; } -export async function fetchIssue(params: FetchIssueParams): Promise { +export async function fetchIssue(params: FetchIssueParams): Promise { const { apiKey, apiBaseUrl, issueId, debug } = params; if (!apiKey) { throw new Error("API key is required"); @@ -181,6 +231,7 @@ export async function fetchIssue(params: FetchIssueParams): Promise { const base = normalizeBaseUrl(apiBaseUrl); const url = new URL(`${base}/issues`); + url.searchParams.set("select", "id,title,description,status,created_at,author_display_name"); url.searchParams.set("id", `eq.${issueId}`); url.searchParams.set("limit", "1"); @@ -223,12 +274,12 @@ export async function fetchIssue(params: FetchIssueParams): Promise { try { const parsed = JSON.parse(data); if (Array.isArray(parsed)) { - resolve(parsed[0] ?? null); + resolve((parsed[0] as IssueDetail) ?? null); } else { - resolve(parsed); + resolve(parsed as IssueDetail); } } catch { - resolve(data); + reject(new Error(`Failed to parse issue response: ${data}`)); } } else { let errMsg = `Failed to fetch issue: HTTP ${res.statusCode}`; @@ -260,7 +311,7 @@ export interface CreateIssueCommentParams { debug?: boolean; } -export async function createIssueComment(params: CreateIssueCommentParams): Promise { +export async function createIssueComment(params: CreateIssueCommentParams): Promise { const { apiKey, apiBaseUrl, issueId, content, parentCommentId, debug } = params; if (!apiKey) { throw new Error("API key is required"); @@ -324,10 +375,10 @@ export async function createIssueComment(params: CreateIssueCommentParams): Prom } if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) { try { - const parsed = JSON.parse(data); + const parsed = JSON.parse(data) as IssueComment; resolve(parsed); } catch { - resolve(data); + reject(new Error(`Failed to parse create comment response: ${data}`)); } } else { let errMsg = `Failed to create issue comment: HTTP ${res.statusCode}`; diff --git a/cli/lib/mcp-server.ts b/cli/lib/mcp-server.ts index ede1f17..5532532 100644 --- a/cli/lib/mcp-server.ts +++ b/cli/lib/mcp-server.ts @@ -109,16 +109,8 @@ export async function startMcpServer(rootOpts?: RootOptsLike, extra?: { debug?: try { if (toolName === "list_issues") { - const result = await fetchIssues({ apiKey, apiBaseUrl, debug }); - const trimmed = Array.isArray(result) - ? (result as any[]).map((r) => ({ - id: (r as any).id, - title: (r as any).title, - status: (r as any).status, - created_at: (r as any).created_at, - })) - : result; - return { content: [{ type: "text", text: JSON.stringify(trimmed, null, 2) }] }; + const issues = await fetchIssues({ apiKey, apiBaseUrl, debug }); + return { content: [{ type: "text", text: JSON.stringify(issues, null, 2) }] }; } if (toolName === "view_issue") { diff --git a/cli/package-lock.json b/cli/package-lock.json index 7466c86..3c9d4cf 100644 --- a/cli/package-lock.json +++ b/cli/package-lock.json @@ -1,12 +1,12 @@ { "name": "postgresai", - "version": "0.12.0-beta.6", + "version": "0.12.0-beta.7", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "postgresai", - "version": "0.12.0-beta.6", + "version": "0.12.0-beta.7", "license": "Apache-2.0", "dependencies": { "@modelcontextprotocol/sdk": "^1.20.2", diff --git a/cli/package.json b/cli/package.json index 261555a..6d5afb0 100644 --- a/cli/package.json +++ b/cli/package.json @@ -1,6 +1,6 @@ { "name": "postgresai", - "version": "0.12.0-beta.6", + "version": "0.12.0-beta.7", "description": "postgres_ai CLI (Node.js)", "license": "Apache-2.0", "private": false, diff --git a/reporter/postgres_reports.py b/reporter/postgres_reports.py index 1a989bc..c003146 100644 --- a/reporter/postgres_reports.py +++ b/reporter/postgres_reports.py @@ -21,8 +21,12 @@ class PostgresReportGenerator: + # Default databases to always exclude + DEFAULT_EXCLUDED_DATABASES = {'template0', 'template1', 'rdsadmin', 'azure_maintenance', 'cloudsqladmin'} + def __init__(self, prometheus_url: str = "http://sink-prometheus:9090", - postgres_sink_url: str = "postgresql://pgwatch@sink-postgres:5432/measurements"): + postgres_sink_url: str = "postgresql://pgwatch@sink-postgres:5432/measurements", + excluded_databases: Optional[List[str]] = None): """ Initialize the PostgreSQL report generator. @@ -30,11 +34,16 @@ def __init__(self, prometheus_url: str = "http://sink-prometheus:9090", prometheus_url: URL of the Prometheus instance (default: http://sink-prometheus:9090) postgres_sink_url: Connection string for the Postgres sink database (default: postgresql://pgwatch@sink-postgres:5432/measurements) + excluded_databases: Additional databases to exclude from reports """ self.prometheus_url = prometheus_url self.base_url = f"{prometheus_url}/api/v1" self.postgres_sink_url = postgres_sink_url self.pg_conn = None + # Combine default exclusions with user-provided exclusions + self.excluded_databases = self.DEFAULT_EXCLUDED_DATABASES.copy() + if excluded_databases: + self.excluded_databases.update(excluded_databases) def test_connection(self) -> bool: """Test connection to Prometheus.""" @@ -146,6 +155,62 @@ def query_instant(self, query: str) -> Dict[str, Any]: print(f"Query error: {e}") return {} + def _get_postgres_version_info(self, cluster: str, node_name: str) -> Dict[str, str]: + """ + Fetch and parse Postgres version information from pgwatch settings metrics. + + Notes: + - This helper is intentionally defensive: it validates the returned setting_name label + (tests may stub query responses broadly by metric name substring). + - Uses a single query with a regex on setting_name to reduce roundtrips. + """ + query = ( + f'last_over_time(pgwatch_settings_configured{{' + f'cluster="{cluster}", node_name="{node_name}", ' + f'setting_name=~"server_version|server_version_num"}}[3h])' + ) + + result = self.query_instant(query) + version_str = None + version_num = None + + if result.get("status") == "success": + if result.get("data", {}).get("result"): + for item in result["data"]["result"]: + metric = item.get("metric", {}) or {} + setting_name = metric.get("setting_name", "") + setting_value = metric.get("setting_value", "") + if setting_name == "server_version" and setting_value: + version_str = setting_value + elif setting_name == "server_version_num" and setting_value: + version_num = setting_value + else: + print(f"Warning: No version data found (cluster={cluster}, node_name={node_name})") + else: + print(f"Warning: Version query failed (cluster={cluster}, node_name={node_name}): status={result.get('status')}") + + server_version = version_str or "Unknown" + version_info: Dict[str, str] = { + "version": server_version, + "server_version_num": version_num or "Unknown", + "server_major_ver": "Unknown", + "server_minor_ver": "Unknown", + } + + if server_version != "Unknown": + # Handle both formats: + # - "15.3" + # - "15.3 (Ubuntu 15.3-1.pgdg20.04+1)" + version_parts = server_version.split()[0].split(".") + if len(version_parts) >= 1 and version_parts[0]: + version_info["server_major_ver"] = version_parts[0] + if len(version_parts) >= 2: + version_info["server_minor_ver"] = ".".join(version_parts[1:]) + else: + version_info["server_minor_ver"] = "0" + + return version_info + def generate_a002_version_report(self, cluster: str = "local", node_name: str = "node-01") -> Dict[str, Any]: """ Generate A002 Version Information report. @@ -158,52 +223,7 @@ def generate_a002_version_report(self, cluster: str = "local", node_name: str = Dictionary containing version information """ print(f"Generating A002 Version Information report for cluster='{cluster}', node_name='{node_name}'...") - - # Query PostgreSQL version information using last_over_time to get most recent values - # Use 3h lookback to handle cases where metrics collection might be intermittent - version_queries = { - 'server_version': f'last_over_time(pgwatch_settings_configured{{cluster="{cluster}", node_name="{node_name}", setting_name="server_version"}}[3h])', - 'server_version_num': f'last_over_time(pgwatch_settings_configured{{cluster="{cluster}", node_name="{node_name}", setting_name="server_version_num"}}[3h])', - } - - version_data = {} - for metric_name, query in version_queries.items(): - result = self.query_instant(query) - if result.get('status') == 'success' and result.get('data', {}).get('result'): - if len(result['data']['result']) > 0: - # Extract setting_value from the metric labels - latest_value = result['data']['result'][0]['metric'].get('setting_value', '') - if latest_value: - version_data[metric_name] = latest_value - else: - print(f"Warning: A002 - No data for {metric_name} (cluster={cluster}, node_name={node_name})") - else: - print(f"Warning: A002 - Query failed for {metric_name}: status={result.get('status')}") - - # Format the version data - server_version = version_data.get('server_version', 'Unknown') - version_info = { - "version": server_version, - "server_version_num": version_data.get('server_version_num', 'Unknown'), - } - - # Parse major and minor version if we have a valid version string - if server_version and server_version != 'Unknown': - # Handle both formats: "14.5" and "14.5 (Ubuntu 14.5-1.pgdg20.04+1)" - version_parts = server_version.split()[0].split('.') - if len(version_parts) >= 1: - version_info["server_major_ver"] = version_parts[0] - if len(version_parts) >= 2: - version_info["server_minor_ver"] = '.'.join(version_parts[1:]) - else: - version_info["server_minor_ver"] = '0' - else: - version_info["server_major_ver"] = 'Unknown' - version_info["server_minor_ver"] = 'Unknown' - else: - version_info["server_major_ver"] = 'Unknown' - version_info["server_minor_ver"] = 'Unknown' - + version_info = self._get_postgres_version_info(cluster, node_name) return self.format_report_data("A002", {"version": version_info}, node_name) def generate_a003_settings_report(self, cluster: str = "local", node_name: str = "node-01") -> Dict[str, Any]: @@ -254,7 +274,7 @@ def generate_a003_settings_report(self, cluster: str = "local", node_name: str = print(f"Query result status: {result.get('status')}") print(f"Query result data: {result.get('data', {})}") - return self.format_report_data("A003", settings_data, node_name) + return self.format_report_data("A003", settings_data, node_name, postgres_version=self._get_postgres_version_info(cluster, node_name)) def generate_a004_cluster_report(self, cluster: str = "local", node_name: str = "node-01") -> Dict[str, Any]: """ @@ -307,10 +327,15 @@ def generate_a004_cluster_report(self, cluster: str = "local", node_name: str = size_bytes = float(result['value'][1]) database_sizes[db_name] = size_bytes - return self.format_report_data("A004", { - "general_info": cluster_data, - "database_sizes": database_sizes - }, node_name) + return self.format_report_data( + "A004", + { + "general_info": cluster_data, + "database_sizes": database_sizes, + }, + node_name, + postgres_version=self._get_postgres_version_info(cluster, node_name), + ) def generate_a007_altered_settings_report(self, cluster: str = "local", node_name: str = "node-01") -> Dict[ str, Any]: @@ -355,7 +380,7 @@ def generate_a007_altered_settings_report(self, cluster: str = "local", node_nam print(f"Warning: A007 - No altered settings data returned for cluster={cluster}, node_name={node_name}") print(f"Query result status: {result.get('status')}") - return self.format_report_data("A007", altered_settings, node_name) + return self.format_report_data("A007", altered_settings, node_name, postgres_version=self._get_postgres_version_info(cluster, node_name)) def generate_h001_invalid_indexes_report(self, cluster: str = "local", node_name: str = "node-01") -> Dict[ str, Any]: @@ -429,7 +454,12 @@ def generate_h001_invalid_indexes_report(self, cluster: str = "local", node_name "database_size_pretty": self.format_bytes(db_size_bytes) } - return self.format_report_data("H001", invalid_indexes_by_db, node_name) + return self.format_report_data( + "H001", + invalid_indexes_by_db, + node_name, + postgres_version=self._get_postgres_version_info(cluster, node_name), + ) def generate_h002_unused_indexes_report(self, cluster: str = "local", node_name: str = "node-01") -> Dict[str, Any]: """ @@ -551,7 +581,12 @@ def generate_h002_unused_indexes_report(self, cluster: str = "local", node_name: } } - return self.format_report_data("H002", unused_indexes_by_db, node_name) + return self.format_report_data( + "H002", + unused_indexes_by_db, + node_name, + postgres_version=self._get_postgres_version_info(cluster, node_name), + ) def generate_h004_redundant_indexes_report(self, cluster: str = "local", node_name: str = "node-01") -> Dict[ str, Any]: @@ -655,7 +690,12 @@ def generate_h004_redundant_indexes_report(self, cluster: str = "local", node_na "database_size_pretty": self.format_bytes(db_size_bytes) } - return self.format_report_data("H004", redundant_indexes_by_db, node_name) + return self.format_report_data( + "H004", + redundant_indexes_by_db, + node_name, + postgres_version=self._get_postgres_version_info(cluster, node_name), + ) def generate_d004_pgstat_settings_report(self, cluster: str = "local", node_name: str = "node-01") -> Dict[ str, Any]: @@ -724,11 +764,16 @@ def generate_d004_pgstat_settings_report(self, cluster: str = "local", node_name # Check if pg_stat_statements is available and working by querying its metrics pgss_status = self._check_pg_stat_statements_status(cluster, node_name) - return self.format_report_data("D004", { - "settings": pgstat_data, - "pg_stat_statements_status": pgss_status, - "pg_stat_kcache_status": kcache_status - }, node_name) + return self.format_report_data( + "D004", + { + "settings": pgstat_data, + "pg_stat_statements_status": pgss_status, + "pg_stat_kcache_status": kcache_status, + }, + node_name, + postgres_version=self._get_postgres_version_info(cluster, node_name), + ) def _check_pg_stat_kcache_status(self, cluster: str, node_name: str) -> Dict[str, Any]: """ @@ -897,7 +942,7 @@ def generate_f001_autovacuum_settings_report(self, cluster: str = "local", node_ "pretty_value": self.format_setting_value(setting_name, setting_value, unit) } - return self.format_report_data("F001", autovacuum_data, node_name) + return self.format_report_data("F001", autovacuum_data, node_name, postgres_version=self._get_postgres_version_info(cluster, node_name)) def generate_f005_btree_bloat_report(self, cluster: str = "local", node_name: str = "node-01") -> Dict[str, Any]: """ @@ -986,7 +1031,12 @@ def generate_f005_btree_bloat_report(self, cluster: str = "local", node_name: st "database_size_pretty": self.format_bytes(db_size_bytes) } - return self.format_report_data("F005", bloated_indexes_by_db, node_name) + return self.format_report_data( + "F005", + bloated_indexes_by_db, + node_name, + postgres_version=self._get_postgres_version_info(cluster, node_name), + ) def generate_g001_memory_settings_report(self, cluster: str = "local", node_name: str = "node-01") -> Dict[ str, Any]: @@ -1064,10 +1114,15 @@ def generate_g001_memory_settings_report(self, cluster: str = "local", node_name # Calculate some memory usage estimates and recommendations memory_analysis = self._analyze_memory_settings(memory_data) - return self.format_report_data("G001", { - "settings": memory_data, - "analysis": memory_analysis - }, node_name) + return self.format_report_data( + "G001", + { + "settings": memory_data, + "analysis": memory_analysis, + }, + node_name, + postgres_version=self._get_postgres_version_info(cluster, node_name), + ) def _analyze_memory_settings(self, memory_data: Dict[str, Any]) -> Dict[str, Any]: """ @@ -1252,7 +1307,12 @@ def generate_f004_heap_bloat_report(self, cluster: str = "local", node_name: str "database_size_pretty": self.format_bytes(db_size_bytes) } - return self.format_report_data("F004", bloated_tables_by_db, node_name) + return self.format_report_data( + "F004", + bloated_tables_by_db, + node_name, + postgres_version=self._get_postgres_version_info(cluster, node_name), + ) def generate_k001_query_calls_report(self, cluster: str = "local", node_name: str = "node-01", time_range_minutes: int = 60) -> Dict[str, Any]: @@ -1309,7 +1369,12 @@ def generate_k001_query_calls_report(self, cluster: str = "local", node_name: st } } - return self.format_report_data("K001", queries_by_db, node_name) + return self.format_report_data( + "K001", + queries_by_db, + node_name, + postgres_version=self._get_postgres_version_info(cluster, node_name), + ) def generate_k003_top_queries_report(self, cluster: str = "local", node_name: str = "node-01", time_range_minutes: int = 60, limit: int = 50) -> Dict[str, Any]: @@ -1368,7 +1433,12 @@ def generate_k003_top_queries_report(self, cluster: str = "local", node_name: st } } - return self.format_report_data("K003", queries_by_db, node_name) + return self.format_report_data( + "K003", + queries_by_db, + node_name, + postgres_version=self._get_postgres_version_info(cluster, node_name), + ) def _get_pgss_metrics_data(self, cluster: str, node_name: str, start_time: datetime, end_time: datetime) -> List[ Dict[str, Any]]: @@ -1627,7 +1697,8 @@ def format_bytes(self, bytes_value: float) -> str: return f"{value:.2f} {units[unit_index]}" def format_report_data(self, check_id: str, data: Dict[str, Any], host: str = "target-database", - all_hosts: Dict[str, List[str]] = None) -> Dict[str, Any]: + all_hosts: Dict[str, List[str]] = None, + postgres_version: Dict[str, str] = None) -> Dict[str, Any]: """ Format data to match template structure. @@ -1636,6 +1707,7 @@ def format_report_data(self, check_id: str, data: Dict[str, Any], host: str = "t data: The data to format (can be a dict with node keys if combining multiple nodes) host: Primary host identifier (used if all_hosts not provided) all_hosts: Optional dict with 'primary' and 'standbys' keys for multi-node reports + postgres_version: Optional Postgres version info to include at report level Returns: Dictionary formatted for templates @@ -1654,14 +1726,16 @@ def format_report_data(self, check_id: str, data: Dict[str, Any], host: str = "t # Handle both single-node and multi-node data structures if isinstance(data, dict) and any(isinstance(v, dict) and 'data' in v for v in data.values()): # Multi-node structure: data is already in {node_name: {"data": ...}} format + # postgres_version should already be embedded per-node; warn if passed here + if postgres_version: + print(f"Warning: postgres_version parameter ignored for multi-node data in {check_id}") results = data else: # Single-node structure: wrap data in host key - results = { - host: { - "data": data - } - } + node_result = {"data": data} + if postgres_version: + node_result["postgres_version"] = postgres_version + results = {host: node_result} template_data = { "checkId": check_id, @@ -2097,7 +2171,7 @@ def get_all_databases(self, cluster: str = "local", node_name: str = "node-01") # Helper to add a name safely def add_db(name: str) -> None: - if name and name not in ('template0', 'template1') and name not in database_set: + if name and name not in self.excluded_databases and name not in database_set: database_set.add(name) databases.append(name) @@ -2292,10 +2366,18 @@ def main(): parser.add_argument('--epoch', default='1') parser.add_argument('--no-upload', action='store_true', default=False, help='Do not upload reports to the API') + parser.add_argument('--exclude-databases', type=str, default=None, + help='Comma-separated list of additional databases to exclude from reports ' + f'(default exclusions: {", ".join(sorted(PostgresReportGenerator.DEFAULT_EXCLUDED_DATABASES))})') args = parser.parse_args() + + # Parse excluded databases + excluded_databases = None + if args.exclude_databases: + excluded_databases = [db.strip() for db in args.exclude_databases.split(',')] - generator = PostgresReportGenerator(args.prometheus_url, args.postgres_sink_url) + generator = PostgresReportGenerator(args.prometheus_url, args.postgres_sink_url, excluded_databases) # Test connection if not generator.test_connection(): diff --git a/tests/reporter/test_generators_unit.py b/tests/reporter/test_generators_unit.py index 4d6d209..0c3f5e1 100644 --- a/tests/reporter/test_generators_unit.py +++ b/tests/reporter/test_generators_unit.py @@ -133,20 +133,27 @@ def test_generate_a002_version_report( } def fake_query(query: str) -> dict[str, Any]: - for key, val in values.items(): - if f'setting_name="{key}"' in query: - return { - "status": "success", - "data": { - "result": [ - { - "metric": { - "setting_value": val, - } + # A002 uses a helper that queries both settings via a single regex selector. + if 'setting_name=~"server_version|server_version_num"' in query: + return { + "status": "success", + "data": { + "result": [ + { + "metric": { + "setting_name": "server_version", + "setting_value": values["server_version"], } - ] - }, - } + }, + { + "metric": { + "setting_name": "server_version_num", + "setting_value": values["server_version_num"], + } + }, + ] + }, + } return {"status": "success", "data": {"result": []}} monkeypatch.setattr(generator, "query_instant", fake_query) @@ -309,6 +316,18 @@ def fake_query(query: str) -> dict[str, Any]: @pytest.mark.unit def test_generate_a007_altered_settings_report(monkeypatch: pytest.MonkeyPatch, generator: PostgresReportGenerator) -> None: def fake_query(query: str) -> dict[str, Any]: + # Handle version info query from _get_postgres_version_info + if 'setting_name=~"server_version|server_version_num"' in query: + return { + "status": "success", + "data": { + "result": [ + {"metric": {"setting_name": "server_version", "setting_value": "15.0"}}, + {"metric": {"setting_name": "server_version_num", "setting_value": "150000"}}, + ] + }, + } + # Handle altered settings query assert "pgwatch_settings_is_default" in query return { "status": "success", @@ -340,6 +359,7 @@ def fake_query(query: str) -> dict[str, Any]: data = payload["results"]["node-1"]["data"] assert set(data.keys()) == {"work_mem", "autovacuum"} + assert "postgres_version" in payload["results"]["node-1"] # postgres_version is at node level assert data["work_mem"]["pretty_value"] == "1 MB" assert data["autovacuum"]["pretty_value"] == "off" @@ -781,6 +801,8 @@ def fake_make_request(api_url, endpoint, request_data): @pytest.mark.unit def test_main_runs_specific_check_without_upload(monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str]) -> None: class DummyGenerator: + DEFAULT_EXCLUDED_DATABASES = {'template0', 'template1', 'rdsadmin', 'azure_maintenance', 'cloudsqladmin'} + def __init__(self, *args, **kwargs): self.closed = False @@ -824,6 +846,8 @@ def close_postgres_sink(self): @pytest.mark.unit def test_main_exits_when_connection_fails(monkeypatch: pytest.MonkeyPatch) -> None: class FailingGenerator: + DEFAULT_EXCLUDED_DATABASES = {'template0', 'template1', 'rdsadmin', 'azure_maintenance', 'cloudsqladmin'} + def __init__(self, *args, **kwargs): pass