From 4ce1014dfe2d295a17ca1d1d22fe349deb0941a8 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Fri, 10 Apr 2026 15:35:21 -0700 Subject: [PATCH 1/2] fix: [#691] harden `sql_explain` and `altimate_core_validate` input handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `sql_explain` and `altimate_core_validate` produced high failure rates and unhelpful errors because their input validation did not match the parameter contracts they advertised. **`sql_explain`** - Reject empty, whitespace-only, and placeholder-only SQL (`?`, `:var`, `$1`) before hitting the warehouse. Verbatim driver errors like "SQL compilation error: syntax error line 1 at position 8 unexpected ?." are unrecoverable for LLM callers — catch the common cases early with actionable guidance. - Reject empty / placeholder warehouse names (`?`, `:wh`, `""`) with a pointer to `warehouse_list` rather than letting the Registry emit "Connection ? not found. Available: (none)". - Introduce `buildExplainPlan()` that returns `{prefix, actuallyAnalyzed}` so the handler can pick dialect-correct syntax AND report the true plan mode. Fixes the previous lie where Snowflake users who asked for ANALYZE saw `analyzed: true` in metadata even though we silently sent plain `EXPLAIN USING TEXT`. - Warehouse coverage is now explicit: Snowflake (`EXPLAIN USING TEXT`), PostgreSQL (`EXPLAIN (ANALYZE, BUFFERS)`), Redshift (plain `EXPLAIN` — does NOT support ANALYZE), MySQL / MariaDB / DuckDB (`EXPLAIN ANALYZE`), Databricks / Spark (`EXPLAIN FORMATTED`), ClickHouse (`EXPLAIN`). BigQuery, Oracle, and SQL Server return an empty prefix and a clear "not supported via statement prefix — this warehouse needs a different plan mechanism" error instead of issuing a broken statement. - New `translateExplainError()` helper rewrites common driver errors into actionable messages: connection-not-found becomes "Available warehouses: a, b, c", unsubstituted-`?` compile errors become "inline the literal value", permission denials call out role grants, etc. - Intentional non-change: we do NOT scan mid-query for stray `?` because PostgreSQL JSONB uses `?`, `?|`, `?&` as legitimate operators. Only the bare-placeholder check runs, which has no false positives. **`altimate_core_validate`** - Remove the hard-gate that returned "No schema provided..." before the engine ran. The parameter schema already declared `schema_path` and `schema_context` as optional, and the Rust core (`altimate-core`) has accepted empty schemas for a long time via the existing `schemaOrEmpty` helper — the hard-gate was gratuitous and every call without schema failed identically with no recovery path. - When schema is absent, the engine still runs so syntax / dialect findings come through, and the tool surfaces a clear warning in the output and `has_schema: false` in metadata so callers can distinguish full validation from schema-less runs. Title becomes `Validate: VALID (no schema)` for unambiguous signaling. - Errors thrown by the engine are now classified as `error_class: engine_failure` to keep them distinct from user-input problems. **Tests** - New `test/altimate/tools/sql-explain.test.ts`: input validation (empty, placeholder, short), warehouse-name validation, full `buildExplainPlan` matrix (including the Redshift / BigQuery / Oracle regressions above), error translation paths, and Tool.execute integration with a mocked dispatcher. - New `test/altimate/tools/altimate-core-validate.test.ts`: schema resolution (none / `schema_context` / `schema_path` / empty object), validation error classification, output formatting, and a regression guard asserting the dispatcher is called even when no schema is given. - Updated `test/altimate/tool-error-propagation.test.ts` to reflect the new contract: the validate tool now runs the engine without schema and returns `success: true` with a warning, instead of early-returning an error. The regression fixture keeps telemetry coverage. Closes #691 Co-Authored-By: Claude Opus 4.6 (1M context) --- .../altimate/native/connections/register.ts | 177 ++++++- .../altimate/tools/altimate-core-validate.ts | 37 +- .../src/altimate/tools/sql-explain.ts | 137 +++++- .../altimate/tool-error-propagation.test.ts | 20 +- .../tools/altimate-core-validate.test.ts | 286 +++++++++++ .../test/altimate/tools/sql-explain.test.ts | 450 ++++++++++++++++++ 6 files changed, 1070 insertions(+), 37 deletions(-) create mode 100644 packages/opencode/test/altimate/tools/altimate-core-validate.test.ts create mode 100644 packages/opencode/test/altimate/tools/sql-explain.test.ts diff --git a/packages/opencode/src/altimate/native/connections/register.ts b/packages/opencode/src/altimate/native/connections/register.ts index 867ebd6d3..d5800c8c2 100644 --- a/packages/opencode/src/altimate/native/connections/register.ts +++ b/packages/opencode/src/altimate/native/connections/register.ts @@ -182,6 +182,147 @@ function getWarehouseType(warehouseName?: string): string { return Registry.getConfig(warehouseName)?.type ?? "unknown" } +/** + * Dialect-aware EXPLAIN plan describing how a warehouse should be asked for + * a query plan. + * + * - `prefix`: the statement prefix to prepend to the SQL ("" means this + * warehouse cannot be EXPLAIN'd via a simple statement prefix — the + * handler will return a clear error instead of issuing a broken query) + * - `actuallyAnalyzed`: whether the prefix actually runs the query and + * reports runtime stats. The caller requested `analyze=true`, but some + * warehouses silently downgrade to plan-only (e.g. Snowflake), so we + * reflect the true mode back to the user in the result envelope. + */ +export interface ExplainPlan { + prefix: string + actuallyAnalyzed: boolean +} + +/** + * Build a dialect-aware EXPLAIN plan for the given warehouse type. + * + * Warehouse-specific notes: + * - Snowflake: `EXPLAIN USING TEXT`. No ANALYZE variant — silently downgraded. + * - PostgreSQL: `EXPLAIN` or `EXPLAIN (ANALYZE, BUFFERS)` for runtime stats. + * - Redshift: plain `EXPLAIN` only. Redshift does NOT support ANALYZE. + * - MySQL / MariaDB: `EXPLAIN` or `EXPLAIN ANALYZE` (MySQL 8+). + * - DuckDB: `EXPLAIN` or `EXPLAIN ANALYZE`. + * - Databricks / Spark: `EXPLAIN` or `EXPLAIN FORMATTED`. + * - ClickHouse: `EXPLAIN` (no ANALYZE form accepted via statement prefix). + * - BigQuery: uses a dry-run API instead of any EXPLAIN statement. Not + * supported via this code path — return empty prefix. + * - Oracle: `EXPLAIN PLAN FOR` stores the plan in PLAN_TABLE rather than + * returning it, so a bare prefix does not produce output. Not supported. + * - SQL Server: requires `SET SHOWPLAN_TEXT ON` as a session setting. + * Not supported via statement prefix. + */ +export function buildExplainPlan(warehouseType: string | undefined, analyze: boolean): ExplainPlan { + const type = (warehouseType ?? "").toLowerCase() + switch (type) { + case "snowflake": + // Snowflake: no ANALYZE; USING TEXT returns a readable plan. + return { prefix: "EXPLAIN USING TEXT", actuallyAnalyzed: false } + case "postgres": + case "postgresql": + return analyze + ? { prefix: "EXPLAIN (ANALYZE, BUFFERS)", actuallyAnalyzed: true } + : { prefix: "EXPLAIN", actuallyAnalyzed: false } + case "redshift": + // Redshift supports only plain EXPLAIN — no ANALYZE/BUFFERS. + return { prefix: "EXPLAIN", actuallyAnalyzed: false } + case "mysql": + case "mariadb": + // MySQL 8+ supports `EXPLAIN ANALYZE`. Older versions will reject it + // at the warehouse and the error will be surfaced to the caller. + return analyze + ? { prefix: "EXPLAIN ANALYZE", actuallyAnalyzed: true } + : { prefix: "EXPLAIN", actuallyAnalyzed: false } + case "duckdb": + return analyze + ? { prefix: "EXPLAIN ANALYZE", actuallyAnalyzed: true } + : { prefix: "EXPLAIN", actuallyAnalyzed: false } + case "databricks": + case "spark": + // Databricks/Spark `EXPLAIN FORMATTED` returns a more detailed plan but + // does not actually execute the query — still a plan-only mode. + return { prefix: analyze ? "EXPLAIN FORMATTED" : "EXPLAIN", actuallyAnalyzed: false } + case "clickhouse": + return { prefix: "EXPLAIN", actuallyAnalyzed: false } + case "bigquery": + // BigQuery has no EXPLAIN statement — the correct answer is a dry-run + // job via the BigQuery API, which this tool does not support today. + return { prefix: "", actuallyAnalyzed: false } + case "oracle": + // Oracle's `EXPLAIN PLAN FOR` stores the plan in PLAN_TABLE and returns + // no rows, so a statement-prefix approach does not work. Callers would + // need a follow-up `SELECT * FROM TABLE(DBMS_XPLAN.DISPLAY)`. + return { prefix: "", actuallyAnalyzed: false } + case "mssql": + case "sqlserver": + // SQL Server requires `SET SHOWPLAN_TEXT ON` as a session setting, + // not a statement prefix. Not supported via this code path. + return { prefix: "", actuallyAnalyzed: false } + default: + // Unknown warehouse — fall back to plain EXPLAIN and let the driver + // surface a real error if it does not understand the syntax. + return analyze + ? { prefix: "EXPLAIN ANALYZE", actuallyAnalyzed: true } + : { prefix: "EXPLAIN", actuallyAnalyzed: false } + } +} + +/** @deprecated Use buildExplainPlan for richer metadata about the plan mode. */ +export function buildExplainPrefix(warehouseType: string | undefined, analyze: boolean): string { + return buildExplainPlan(warehouseType, analyze).prefix +} + +/** + * Translate a raw warehouse/Registry error into an actionable message. + * + * Verbatim driver errors like "Connection ? not found. Available: (none)" are + * useless to an LLM caller that has no way to fix them. Detect common patterns + * and rewrite them with concrete next steps. + */ +export function translateExplainError( + raw: unknown, + warehouseName: string | undefined, + availableWarehouses: string[], +): string { + const msg = raw instanceof Error ? raw.message : String(raw) + + // Connection/warehouse lookup failures. + if (/Connection\s+.+\s+not found/i.test(msg) || /warehouse\s+.+\s+not found/i.test(msg)) { + if (availableWarehouses.length > 0) { + return `Warehouse ${JSON.stringify(warehouseName ?? "")} is not configured. Available warehouses: ${availableWarehouses.join(", ")}. Pass one of these as the 'warehouse' parameter, or omit it to use the default.` + } + return "No warehouses are configured. Run `warehouse_add` to set one up before calling sql_explain." + } + + // Unsubstituted-placeholder compilation errors from Snowflake / PG / MySQL. + if (/syntax error.*unexpected\s*\?/i.test(msg) || /syntax error.*position\s+\d+.*\?/i.test(msg)) { + return "SQL compilation error: the query contains an unsubstituted `?` placeholder. sql_explain does not support parameterized queries — inline the literal value before calling." + } + + // Snowflake-specific: ANALYZE not supported. + if (/EXPLAIN\s+ANALYZE/i.test(msg) && /not\s+supported/i.test(msg)) { + return "This warehouse does not support EXPLAIN ANALYZE. Retry with analyze=false to get a plan-only EXPLAIN." + } + + // Permission denials. + if (/permission\s+denied/i.test(msg) || /access\s+denied/i.test(msg) || /insufficient\s+privilege/i.test(msg)) { + return `The warehouse user lacks permission to EXPLAIN this query. Check role grants for the objects referenced in the SQL. Original error: ${msg}` + } + + // Generic SQL compilation error fallback. + if (/SQL compilation error/i.test(msg) || /syntax error/i.test(msg)) { + return `SQL compilation error in the query passed to sql_explain. Fix the SQL and retry. Original error: ${msg}` + } + + // Fall through: return the original but with a hint to aid debugging. + return msg +} + /** Register all connection-related handlers. Exported for test re-registration. */ export function registerAll(): void { @@ -261,10 +402,11 @@ register("sql.execute", async (params: SqlExecuteParams): Promise => { + let warehouseName: string | undefined + let warehouseType: string | undefined try { - const warehouseName = params.warehouse + warehouseName = params.warehouse let connector - let warehouseType: string | undefined if (warehouseName) { connector = await Registry.get(warehouseName) @@ -276,13 +418,25 @@ register("sql.explain", async (params: SqlExplainParams): Promise String(r[0])).join("\n") const planRows = result.rows.map((r, i) => ({ @@ -295,13 +449,18 @@ register("sql.explain", async (params: SqlExplainParams): Promise w.name) return { success: false, plan_rows: [], - error: String(e), + error: translateExplainError(e, warehouseName, available), + warehouse_type: warehouseType, analyzed: params.analyze ?? false, } } diff --git a/packages/opencode/src/altimate/tools/altimate-core-validate.ts b/packages/opencode/src/altimate/tools/altimate-core-validate.ts index 384b5faed..df64a7934 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-validate.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-validate.ts @@ -5,24 +5,14 @@ import type { Telemetry } from "../telemetry" export const AltimateCoreValidateTool = Tool.define("altimate_core_validate", { description: - "Validate SQL syntax and schema references. Checks if tables/columns exist in the schema and if SQL is valid for the target dialect. IMPORTANT: Provide schema_context or schema_path — without schema, all table/column references will report as 'not found'.", + "Validate SQL syntax and schema references. Checks if tables/columns exist in the schema and if SQL is valid for the target dialect. If no schema_path or schema_context is provided, validation still runs but schema-dependent checks (table/column existence) are skipped — syntax and dialect checks still apply. For full validation, run `schema_inspect` first on the referenced tables or pass `schema_context` inline.", parameters: z.object({ sql: z.string().describe("SQL query to validate"), schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), schema_context: z.record(z.string(), z.any()).optional().describe("Inline schema definition"), }), - async execute(args, ctx) { + async execute(args, _ctx) { const hasSchema = !!(args.schema_path || (args.schema_context && Object.keys(args.schema_context).length > 0)) - const noSchema = !hasSchema - if (noSchema) { - const error = - "No schema provided. Provide schema_context or schema_path so table/column references can be resolved." - return { - title: "Validate: NO SCHEMA", - metadata: { success: false, valid: false, has_schema: false, error }, - output: `Error: ${error}`, - } - } try { const result = await Dispatcher.call("altimate_core.validate", { sql: args.sql, @@ -38,7 +28,7 @@ export const AltimateCoreValidateTool = Tool.define("altimate_core_validate", { })) // altimate_change end return { - title: `Validate: ${data.valid ? "VALID" : "INVALID"}`, + title: `Validate: ${data.valid ? "VALID" : "INVALID"}${hasSchema ? "" : " (no schema)"}`, metadata: { success: true, // engine ran — validation errors are findings, not failures valid: data.valid, @@ -46,19 +36,26 @@ export const AltimateCoreValidateTool = Tool.define("altimate_core_validate", { ...(error && { error }), ...(findings.length > 0 && { findings }), }, - output: formatValidate(data), + output: formatValidate(data, hasSchema), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) return { title: "Validate: ERROR", - metadata: { success: false, valid: false, has_schema: hasSchema, error: msg }, + metadata: { success: false, valid: false, has_schema: hasSchema, error: msg, error_class: "engine_failure" }, output: `Failed: ${msg}`, } } }, }) +// Exported for unit tests. +export const _altimateCoreValidateInternal = { + extractValidationErrors, + classifyValidationError, + formatValidate, +} + function extractValidationErrors(data: Record): string | undefined { if (Array.isArray(data.errors) && data.errors.length > 0) { const msgs = data.errors.map((e: any) => e.message ?? String(e)).filter(Boolean) @@ -77,14 +74,20 @@ function classifyValidationError(message: string): string { return "validation_error" } -function formatValidate(data: Record): string { +function formatValidate(data: Record, hasSchema: boolean): string { if (data.error) return `Error: ${data.error}` - if (data.valid) return "SQL is valid." + const schemaNote = hasSchema + ? "" + : "\n\nNote: No schema was provided, so table/column existence checks were skipped. " + + "To catch missing tables or columns, run `schema_inspect` on the referenced tables first " + + "or pass `schema_context` inline with {tables: {...}}." + if (data.valid) return `SQL is valid.${schemaNote}` const lines = ["Validation failed:\n"] for (const err of data.errors ?? []) { lines.push(` • ${err.message}`) if (err.location) lines.push(` at line ${err.location.line}`) } + if (schemaNote) lines.push(schemaNote) return lines.join("\n") } diff --git a/packages/opencode/src/altimate/tools/sql-explain.ts b/packages/opencode/src/altimate/tools/sql-explain.ts index 0786be6a2..5e9bae7db 100644 --- a/packages/opencode/src/altimate/tools/sql-explain.ts +++ b/packages/opencode/src/altimate/tools/sql-explain.ts @@ -3,19 +3,127 @@ import { Tool } from "../../tool/tool" import { Dispatcher } from "../native" import type { SqlExplainResult } from "../native/types" +/** + * Detect SQL input that cannot be meaningfully EXPLAIN'd. + * + * `sql_explain` does not support parameterized queries — the warehouse will + * reject a bare `?`, `:name`, or `$1` because nothing substitutes the bind + * value. Users of this tool must inline their values. + * + * Returns an error string to surface to the caller, or `null` when the SQL + * passes the pre-flight checks. + */ +function validateSqlInput(sql: unknown): string | null { + if (typeof sql !== "string") { + return "sql must be a string" + } + const trimmed = sql.trim() + if (trimmed.length === 0) { + return "sql is empty — pass the full query text to run EXPLAIN against" + } + // Detect queries that consist only of placeholder tokens. These come from + // LLMs that generated a parameterized query (e.g. `SELECT ... WHERE id = ?`) + // and dropped the surrounding SQL, or from templating mistakes. This check + // must run before the length check so a bare `?` is classified correctly. + // + // We intentionally do NOT scan mid-query for stray `?` characters because + // PostgreSQL JSONB uses `?`, `?|`, `?&` as legitimate "key exists" operators + // (e.g. `SELECT * FROM t WHERE data ? 'key'`). Flagging those would break + // real queries. Callers who pass unsubstituted binds inside an otherwise + // complete statement will get a translated error from the warehouse. + if (/^[?$:@][\w]*$/.test(trimmed)) { + return ( + "sql contains only a placeholder token (" + + JSON.stringify(trimmed) + + "). sql_explain does not support parameterized queries — inline the actual values into the SQL text." + ) + } + if (trimmed.length < 6) { + // "SELECT" is 6 chars; anything shorter cannot be a real EXPLAIN-able query + return `sql is too short to be a valid query: ${JSON.stringify(trimmed)}` + } + return null +} + +/** + * Validate a warehouse name supplied by the caller. + * + * Empty strings and placeholder tokens slip through the optional-parameter + * contract and produce unhelpful "Connection X not found" errors from the + * Registry. Catch these here with a pointer to `warehouse_list`. + */ +function validateWarehouseName(warehouse: string | undefined): string | null { + if (warehouse === undefined) return null + if (typeof warehouse !== "string") { + return "warehouse must be a string" + } + const trimmed = warehouse.trim() + if (trimmed.length === 0) { + return "warehouse is an empty string — omit the parameter to use the default warehouse, or pass a configured connection name" + } + if (/^[?$:@]/.test(trimmed)) { + return ( + "warehouse name looks like an unsubstituted placeholder (" + + JSON.stringify(trimmed) + + "). Use `warehouse_list` to see configured warehouses." + ) + } + return null +} + export const SqlExplainTool = Tool.define("sql_explain", { description: - "Run EXPLAIN on a SQL query to get the execution plan. Shows how the database engine will process the query — useful for diagnosing slow queries, identifying full table scans, and understanding join strategies. Requires a warehouse connection.", + "Run EXPLAIN on a SQL query to get the execution plan. Shows how the database engine will process the query — useful for diagnosing slow queries, identifying full table scans, and understanding join strategies. Requires a warehouse connection. IMPORTANT: inline all values into the SQL text — parameterized queries with `?`, `:name`, or `$1` placeholders are not supported.", parameters: z.object({ - sql: z.string().describe("SQL query to explain"), - warehouse: z.string().optional().describe("Warehouse connection name"), + sql: z + .string() + .describe( + "SQL query to explain. Must be fully materialized — no bind placeholders like `?`, `:name`, or `$1`.", + ), + warehouse: z + .string() + .optional() + .describe("Warehouse connection name. Omit to use the first configured warehouse."), analyze: z .boolean() .optional() .default(false) - .describe("Run EXPLAIN ANALYZE (actually executes the query, slower but more accurate)"), + .describe( + "Run EXPLAIN ANALYZE (actually executes the query, slower but more accurate). Not supported by Snowflake.", + ), }), - async execute(args, ctx) { + async execute(args, _ctx) { + // Pre-flight validation — reject bad input before hitting the warehouse + // so we return an actionable message instead of a verbatim DB error. + const sqlError = validateSqlInput(args.sql) + if (sqlError) { + return { + title: "Explain: INVALID INPUT", + metadata: { + success: false, + analyzed: false, + warehouse_type: "unknown", + error: sqlError, + error_class: "input_validation", + }, + output: `Invalid input: ${sqlError}`, + } + } + const warehouseError = validateWarehouseName(args.warehouse) + if (warehouseError) { + return { + title: "Explain: INVALID INPUT", + metadata: { + success: false, + analyzed: false, + warehouse_type: "unknown", + error: warehouseError, + error_class: "input_validation", + }, + output: `Invalid input: ${warehouseError}`, + } + } + try { const result = await Dispatcher.call("sql.explain", { sql: args.sql, @@ -27,14 +135,23 @@ export const SqlExplainTool = Tool.define("sql_explain", { const error = result.error ?? "Unknown error" return { title: "Explain: FAILED", - metadata: { success: false, analyzed: false, warehouse_type: result.warehouse_type ?? "unknown", error }, + metadata: { + success: false, + analyzed: false, + warehouse_type: result.warehouse_type ?? "unknown", + error, + }, output: `Failed to get execution plan: ${error}`, } } return { title: `Explain: ${result.analyzed ? "ANALYZE" : "PLAN"} [${result.warehouse_type ?? "unknown"}]`, - metadata: { success: true, analyzed: result.analyzed, warehouse_type: result.warehouse_type ?? "unknown" }, + metadata: { + success: true, + analyzed: result.analyzed, + warehouse_type: result.warehouse_type ?? "unknown", + }, output: formatPlan(result), } } catch (e) { @@ -48,6 +165,12 @@ export const SqlExplainTool = Tool.define("sql_explain", { }, }) +// Exported for unit tests. +export const _sqlExplainInternal = { + validateSqlInput, + validateWarehouseName, +} + function formatPlan(result: SqlExplainResult): string { const lines: string[] = [] lines.push(`Warehouse type: ${result.warehouse_type}`) diff --git a/packages/opencode/test/altimate/tool-error-propagation.test.ts b/packages/opencode/test/altimate/tool-error-propagation.test.ts index 9270440b6..1a59449e0 100644 --- a/packages/opencode/test/altimate/tool-error-propagation.test.ts +++ b/packages/opencode/test/altimate/tool-error-propagation.test.ts @@ -52,14 +52,26 @@ function telemetryWouldExtract(metadata: Record): string { describe("altimate_core_validate error propagation", () => { beforeEach(() => Dispatcher.reset()) - test("returns early with clear error when no schema provided", async () => { + test("runs engine and warns in output when no schema provided", async () => { + // Previous behavior hard-gated on missing schema and returned an error before + // calling the engine, leading to a terse unrecoverable failure. The new + // contract dispatches to the engine with an empty schema and surfaces a + // warning in the output so schema-less syntax/dialect checks still run. + Dispatcher.register("altimate_core.validate" as any, async () => ({ + success: true, + data: { valid: true, errors: [] }, + })) + const { AltimateCoreValidateTool } = await import("../../src/altimate/tools/altimate-core-validate") const tool = await AltimateCoreValidateTool.init() const result = await tool.execute({ sql: "SELECT * FROM users" }, stubCtx()) - expect(result.metadata.success).toBe(false) - expect(result.metadata.error).toContain("No schema provided") - expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + // The engine ran (success=true), schema-less mode is flagged, and the + // output warns the caller that existence checks were skipped. + expect(result.metadata.success).toBe(true) + expect(result.metadata.has_schema).toBe(false) + expect(result.title).toContain("no schema") + expect(String(result.output)).toContain("No schema was provided") }) test("surfaces errors when schema provided but table missing from schema", async () => { diff --git a/packages/opencode/test/altimate/tools/altimate-core-validate.test.ts b/packages/opencode/test/altimate/tools/altimate-core-validate.test.ts new file mode 100644 index 000000000..827044736 --- /dev/null +++ b/packages/opencode/test/altimate/tools/altimate-core-validate.test.ts @@ -0,0 +1,286 @@ +/** + * Tests for AltimateCoreValidateTool — schema handling and error classification. + * + * The hardening here is about removing a contract mismatch: the parameter + * schema declares `schema_path` and `schema_context` as optional, but the + * previous implementation hard-gated on their absence and returned a terse + * error. The fix lets the tool run with an empty schema, surfaces a clear + * warning in the output, and still returns engine findings from the Rust + * core (syntax/dialect checks that do not need a schema). + */ +import { describe, test, expect, spyOn, afterEach, beforeEach } from "bun:test" +import * as Dispatcher from "../../../src/altimate/native/dispatcher" +import { + AltimateCoreValidateTool, + _altimateCoreValidateInternal as toolInternals, +} from "../../../src/altimate/tools/altimate-core-validate" +import { SessionID, MessageID } from "../../../src/session/schema" + +beforeEach(() => { + process.env.ALTIMATE_TELEMETRY_DISABLED = "true" +}) + +afterEach(() => { + delete process.env.ALTIMATE_TELEMETRY_DISABLED +}) + +const ctx = { + sessionID: SessionID.make("ses_test"), + messageID: MessageID.make("msg_test"), + callID: "call_test", + agent: "build", + abort: AbortSignal.any([]), + messages: [], + metadata: () => {}, + ask: async () => {}, +} + +// --------------------------------------------------------------------------- +// Pure helpers +// --------------------------------------------------------------------------- + +describe("classifyValidationError", () => { + const { classifyValidationError } = toolInternals + + test("classifies missing column", () => { + expect(classifyValidationError("column 'foo' not found in users")).toBe("missing_column") + expect(classifyValidationError("Column bar not found")).toBe("missing_column") + }) + + test("classifies missing table", () => { + expect(classifyValidationError("table 'orders' not found")).toBe("missing_table") + }) + + test("column check takes precedence over table check", () => { + // "column X not found in table Y" should classify as missing_column, + // not missing_table, because the column is what is actually absent. + expect(classifyValidationError("column id not found in table users")).toBe("missing_column") + }) + + test("classifies syntax errors", () => { + expect(classifyValidationError("syntax error near SELECT")).toBe("syntax_error") + }) + + test("classifies type mismatches", () => { + expect(classifyValidationError("type mismatch: expected INTEGER got VARCHAR")).toBe("type_mismatch") + }) + + test("falls back to generic validation_error", () => { + expect(classifyValidationError("some unusual semantic issue")).toBe("validation_error") + }) +}) + +describe("extractValidationErrors", () => { + const { extractValidationErrors } = toolInternals + + test("joins multiple error messages with semicolons", () => { + expect( + extractValidationErrors({ + errors: [{ message: "missing column foo" }, { message: "missing column bar" }], + }), + ).toBe("missing column foo; missing column bar") + }) + + test("returns undefined when errors array is empty", () => { + expect(extractValidationErrors({ errors: [] })).toBeUndefined() + }) + + test("returns undefined when errors field is missing", () => { + expect(extractValidationErrors({})).toBeUndefined() + }) + + test("handles non-object error entries", () => { + expect(extractValidationErrors({ errors: ["raw string error"] })).toBe("raw string error") + }) +}) + +describe("formatValidate", () => { + const { formatValidate } = toolInternals + + test("returns 'SQL is valid.' for successful validation with schema", () => { + expect(formatValidate({ valid: true }, true)).toBe("SQL is valid.") + }) + + test("includes schema warning for successful validation without schema", () => { + const result = formatValidate({ valid: true }, false) + expect(result).toContain("SQL is valid.") + expect(result).toContain("No schema was provided") + expect(result).toContain("schema_inspect") + }) + + test("formats validation failures with bullet list", () => { + const result = formatValidate( + { + valid: false, + errors: [ + { message: "missing column foo" }, + { message: "type mismatch in bar", location: { line: 5 } }, + ], + }, + true, + ) + expect(result).toContain("Validation failed") + expect(result).toContain("missing column foo") + expect(result).toContain("type mismatch in bar") + expect(result).toContain("at line 5") + }) + + test("appends schema note to failures when schema is missing", () => { + const result = formatValidate( + { valid: false, errors: [{ message: "syntax error" }] }, + false, + ) + expect(result).toContain("syntax error") + expect(result).toContain("No schema was provided") + }) +}) + +// --------------------------------------------------------------------------- +// Integration: Tool.execute with mocked Dispatcher +// --------------------------------------------------------------------------- + +describe("AltimateCoreValidateTool.execute", () => { + let dispatcherSpy: ReturnType | undefined + + afterEach(() => { + dispatcherSpy?.mockRestore() + dispatcherSpy = undefined + }) + + function mockDispatcher(response: unknown) { + dispatcherSpy?.mockRestore() + dispatcherSpy = spyOn(Dispatcher, "call").mockImplementation(async () => response as never) + } + + test("runs validation even when no schema is provided", async () => { + mockDispatcher({ + success: true, + data: { valid: true, errors: [] }, + }) + + const tool = await AltimateCoreValidateTool.init() + const result = await tool.execute({ sql: "SELECT 1" }, ctx as any) + + // The engine ran, so success should be true. + expect(result.metadata.success).toBe(true) + expect(result.metadata.has_schema).toBe(false) + expect(result.metadata.valid).toBe(true) + // Title should indicate schema-less mode. + expect(result.title).toContain("no schema") + // Output should warn about the schema gap. + expect(String(result.output)).toContain("No schema was provided") + }) + + test("runs full validation with inline schema_context", async () => { + mockDispatcher({ + success: true, + data: { valid: true, errors: [] }, + }) + + const tool = await AltimateCoreValidateTool.init() + const result = await tool.execute( + { + sql: "SELECT id FROM users", + schema_context: { + users: { id: "INTEGER", name: "VARCHAR" }, + }, + }, + ctx as any, + ) + + expect(result.metadata.success).toBe(true) + expect(result.metadata.has_schema).toBe(true) + expect(result.metadata.valid).toBe(true) + expect(result.title).not.toContain("no schema") + expect(String(result.output)).not.toContain("No schema was provided") + }) + + test("recognizes schema_path as schema source", async () => { + mockDispatcher({ + success: true, + data: { valid: true, errors: [] }, + }) + + const tool = await AltimateCoreValidateTool.init() + const result = await tool.execute( + { sql: "SELECT 1", schema_path: "/tmp/schema.yaml" }, + ctx as any, + ) + + expect(result.metadata.has_schema).toBe(true) + }) + + test("treats empty schema_context object as no schema", async () => { + mockDispatcher({ + success: true, + data: { valid: true, errors: [] }, + }) + + const tool = await AltimateCoreValidateTool.init() + const result = await tool.execute({ sql: "SELECT 1", schema_context: {} }, ctx as any) + + expect(result.metadata.has_schema).toBe(false) + }) + + test("returns invalid result with findings when engine finds errors", async () => { + mockDispatcher({ + success: true, + data: { + valid: false, + errors: [ + { message: "column xyz not found in users" }, + { message: "syntax error near FROOM" }, + ], + }, + }) + + const tool = await AltimateCoreValidateTool.init() + const result = await tool.execute( + { + sql: "SELECT xyz FROOM users", + schema_context: { users: { id: "INTEGER" } }, + }, + ctx as any, + ) + + // Engine ran successfully, so success remains true even though SQL is invalid. + expect(result.metadata.success).toBe(true) + expect(result.metadata.valid).toBe(false) + expect(result.title).toContain("INVALID") + expect(result.metadata.findings).toBeDefined() + expect(Array.isArray(result.metadata.findings)).toBe(true) + // Two errors → two findings + const findings = result.metadata.findings as Array<{ category: string }> + expect(findings).toHaveLength(2) + expect(findings[0].category).toBe("missing_column") + expect(findings[1].category).toBe("syntax_error") + }) + + test("returns ERROR envelope when dispatcher throws", async () => { + dispatcherSpy = spyOn(Dispatcher, "call").mockImplementation(async () => { + throw new Error("core engine crashed") + }) + + const tool = await AltimateCoreValidateTool.init() + const result = await tool.execute({ sql: "SELECT 1" }, ctx as any) + + expect(result.metadata.success).toBe(false) + expect(result.metadata.error_class).toBe("engine_failure") + expect(result.title).toContain("ERROR") + expect(String(result.output)).toContain("core engine crashed") + }) + + test("does not early-return when schema is absent (regression)", async () => { + // Regression guard: the old implementation hard-gated on schema absence + // and never called the dispatcher, causing 266+ identical failures. + const spy = spyOn(Dispatcher, "call").mockImplementation( + async () => ({ success: true, data: { valid: true, errors: [] } }) as never, + ) + + const tool = await AltimateCoreValidateTool.init() + await tool.execute({ sql: "SELECT 1" }, ctx as any) + + expect(spy).toHaveBeenCalledTimes(1) + spy.mockRestore() + }) +}) diff --git a/packages/opencode/test/altimate/tools/sql-explain.test.ts b/packages/opencode/test/altimate/tools/sql-explain.test.ts new file mode 100644 index 000000000..2ba3cf0b8 --- /dev/null +++ b/packages/opencode/test/altimate/tools/sql-explain.test.ts @@ -0,0 +1,450 @@ +/** + * Tests for SqlExplainTool — input validation and error translation. + * + * These tests cover the hardening around: + * 1. Rejecting empty/placeholder SQL before it hits the warehouse. + * 2. Rejecting empty/placeholder warehouse names. + * 3. Dialect-aware EXPLAIN prefix selection (unit tests on the helper). + * 4. Translating verbatim DB errors into actionable messages. + */ +import { describe, test, expect, spyOn, afterEach, beforeEach } from "bun:test" +import * as Dispatcher from "../../../src/altimate/native/dispatcher" +import { + SqlExplainTool, + _sqlExplainInternal as toolInternals, +} from "../../../src/altimate/tools/sql-explain" +import { + buildExplainPlan, + buildExplainPrefix, + translateExplainError, +} from "../../../src/altimate/native/connections/register" +import { SessionID, MessageID } from "../../../src/session/schema" + +beforeEach(() => { + process.env.ALTIMATE_TELEMETRY_DISABLED = "true" +}) + +afterEach(() => { + delete process.env.ALTIMATE_TELEMETRY_DISABLED +}) + +const ctx = { + sessionID: SessionID.make("ses_test"), + messageID: MessageID.make("msg_test"), + callID: "call_test", + agent: "build", + abort: AbortSignal.any([]), + messages: [], + metadata: () => {}, + ask: async () => {}, +} + +// --------------------------------------------------------------------------- +// Input validation helpers (pure functions) +// --------------------------------------------------------------------------- + +describe("validateSqlInput", () => { + const { validateSqlInput } = toolInternals + + test("accepts a normal SELECT query", () => { + expect(validateSqlInput("SELECT id FROM users WHERE id = 42")).toBeNull() + }) + + test("accepts multi-line SQL", () => { + expect( + validateSqlInput(` + SELECT u.id, u.name + FROM users u + JOIN orders o ON o.user_id = u.id + `), + ).toBeNull() + }) + + test("rejects non-string input", () => { + expect(validateSqlInput(123 as unknown)).toContain("must be a string") + expect(validateSqlInput(null as unknown)).toContain("must be a string") + expect(validateSqlInput(undefined as unknown)).toContain("must be a string") + }) + + test("rejects empty string", () => { + expect(validateSqlInput("")).toContain("sql is empty") + }) + + test("rejects whitespace-only string", () => { + expect(validateSqlInput(" \n\t ")).toContain("sql is empty") + }) + + test("rejects strings too short to be a query", () => { + const result = validateSqlInput("WHO") + expect(result).toContain("too short") + }) + + test("rejects bare `?` placeholder", () => { + const result = validateSqlInput("?") + expect(result).toContain("placeholder") + expect(result).toContain("does not support parameterized queries") + }) + + test("rejects bare `:name` placeholder", () => { + const result = validateSqlInput(":userid") + expect(result).toContain("placeholder") + }) + + test("rejects bare `$1` placeholder", () => { + const result = validateSqlInput("$1") + expect(result).toContain("placeholder") + }) + + test("allows a query with a mid-statement `?` — warehouse will reject if it's a bind placeholder", () => { + // We do NOT flag mid-query `?` because PostgreSQL JSONB uses `?`, `?|`, `?&` + // as legitimate "key exists" operators. Letting it through means a real + // Postgres query with JSONB operators still works, and a bad parameterized + // query still fails — just at the warehouse rather than at the validator. + expect(validateSqlInput("SELECT * FROM users WHERE id = ?")).toBeNull() + }) + + test("allows PostgreSQL JSONB `?` key-exists operator", () => { + expect( + validateSqlInput("SELECT id FROM users WHERE metadata ? 'email'"), + ).toBeNull() + }) + + test("allows `?` when it appears inside a string literal", () => { + // Quoted question marks are not bind placeholders + expect(validateSqlInput("SELECT 'hello?' FROM dual")).toBeNull() + }) +}) + +describe("validateWarehouseName", () => { + const { validateWarehouseName } = toolInternals + + test("accepts undefined (no warehouse requested)", () => { + expect(validateWarehouseName(undefined)).toBeNull() + }) + + test("accepts a normal connection name", () => { + expect(validateWarehouseName("snowflake_prod")).toBeNull() + }) + + test("rejects empty string", () => { + const result = validateWarehouseName("") + expect(result).toContain("empty string") + }) + + test("rejects whitespace-only string", () => { + const result = validateWarehouseName(" ") + expect(result).toContain("empty string") + }) + + test("rejects `?` placeholder", () => { + const result = validateWarehouseName("?") + expect(result).toContain("placeholder") + expect(result).toContain("warehouse_list") + }) + + test("rejects `:var` placeholder", () => { + expect(validateWarehouseName(":wh")).toContain("placeholder") + }) +}) + +// --------------------------------------------------------------------------- +// Dialect-aware EXPLAIN prefix +// --------------------------------------------------------------------------- + +describe("buildExplainPlan", () => { + test("Snowflake: EXPLAIN USING TEXT regardless of analyze — analyze silently downgrades", () => { + expect(buildExplainPlan("snowflake", false)).toEqual({ + prefix: "EXPLAIN USING TEXT", + actuallyAnalyzed: false, + }) + // User requested analyze but Snowflake does not support it — the result + // must reflect the true mode (plan-only) so callers are not misled. + expect(buildExplainPlan("snowflake", true)).toEqual({ + prefix: "EXPLAIN USING TEXT", + actuallyAnalyzed: false, + }) + }) + + test("Postgres: plain EXPLAIN or EXPLAIN (ANALYZE, BUFFERS)", () => { + expect(buildExplainPlan("postgres", false)).toEqual({ + prefix: "EXPLAIN", + actuallyAnalyzed: false, + }) + expect(buildExplainPlan("postgres", true)).toEqual({ + prefix: "EXPLAIN (ANALYZE, BUFFERS)", + actuallyAnalyzed: true, + }) + // "postgresql" alias + expect(buildExplainPlan("postgresql", true).prefix).toBe("EXPLAIN (ANALYZE, BUFFERS)") + }) + + test("Redshift: plain EXPLAIN only — does NOT support ANALYZE", () => { + // Regression: earlier versions incorrectly grouped Redshift with Postgres + // and sent EXPLAIN (ANALYZE, BUFFERS), which Redshift rejects. + expect(buildExplainPlan("redshift", false)).toEqual({ + prefix: "EXPLAIN", + actuallyAnalyzed: false, + }) + expect(buildExplainPlan("redshift", true)).toEqual({ + prefix: "EXPLAIN", + actuallyAnalyzed: false, + }) + }) + + test("MySQL / MariaDB: plain EXPLAIN or EXPLAIN ANALYZE", () => { + expect(buildExplainPlan("mysql", false).prefix).toBe("EXPLAIN") + expect(buildExplainPlan("mysql", true).prefix).toBe("EXPLAIN ANALYZE") + expect(buildExplainPlan("mariadb", true).prefix).toBe("EXPLAIN ANALYZE") + }) + + test("DuckDB: plain EXPLAIN or EXPLAIN ANALYZE", () => { + expect(buildExplainPlan("duckdb", true)).toEqual({ + prefix: "EXPLAIN ANALYZE", + actuallyAnalyzed: true, + }) + }) + + test("Databricks / Spark: EXPLAIN or EXPLAIN FORMATTED (both plan-only)", () => { + // FORMATTED returns a richer plan but does not actually execute — so + // actuallyAnalyzed must stay false. + expect(buildExplainPlan("databricks", false)).toEqual({ + prefix: "EXPLAIN", + actuallyAnalyzed: false, + }) + expect(buildExplainPlan("databricks", true)).toEqual({ + prefix: "EXPLAIN FORMATTED", + actuallyAnalyzed: false, + }) + expect(buildExplainPlan("spark", true).prefix).toBe("EXPLAIN FORMATTED") + }) + + test("ClickHouse: plain EXPLAIN", () => { + expect(buildExplainPlan("clickhouse", false).prefix).toBe("EXPLAIN") + }) + + test("BigQuery: not supported via statement prefix", () => { + // BigQuery requires a dry-run API call — no statement EXPLAIN. + expect(buildExplainPlan("bigquery", false).prefix).toBe("") + expect(buildExplainPlan("bigquery", true).prefix).toBe("") + }) + + test("Oracle: not supported via statement prefix", () => { + // Oracle's EXPLAIN PLAN FOR stores rows in PLAN_TABLE, producing no + // output directly — handler will short-circuit with a clear error. + expect(buildExplainPlan("oracle", false).prefix).toBe("") + }) + + test("SQL Server: not supported via statement prefix", () => { + expect(buildExplainPlan("mssql", false).prefix).toBe("") + expect(buildExplainPlan("sqlserver", true).prefix).toBe("") + }) + + test("Unknown warehouse falls back to plain EXPLAIN", () => { + expect(buildExplainPlan("exotic_db", false)).toEqual({ + prefix: "EXPLAIN", + actuallyAnalyzed: false, + }) + expect(buildExplainPlan(undefined, false).prefix).toBe("EXPLAIN") + }) + + test("Is case-insensitive", () => { + expect(buildExplainPlan("SNOWFLAKE", false).prefix).toBe("EXPLAIN USING TEXT") + expect(buildExplainPlan("PostGreSQL", true).prefix).toBe("EXPLAIN (ANALYZE, BUFFERS)") + }) +}) + +describe("buildExplainPrefix (legacy alias)", () => { + test("matches buildExplainPlan.prefix for the same inputs", () => { + const warehouses = ["snowflake", "postgres", "mysql", "redshift", "databricks", "bigquery"] + for (const wh of warehouses) { + for (const analyze of [false, true]) { + expect(buildExplainPrefix(wh, analyze)).toBe(buildExplainPlan(wh, analyze).prefix) + } + } + }) +}) + +// --------------------------------------------------------------------------- +// Error translation +// --------------------------------------------------------------------------- + +describe("translateExplainError", () => { + test("translates `Connection not found` into available-warehouse hint", () => { + const result = translateExplainError( + new Error("Connection ? not found. Available: (none)"), + "?", + ["prod_snowflake", "analytics_pg"], + ) + expect(result).toContain("not configured") + expect(result).toContain("prod_snowflake") + expect(result).toContain("analytics_pg") + }) + + test("tells user to run warehouse_add when none are configured", () => { + const result = translateExplainError( + new Error("Connection main not found"), + "main", + [], + ) + expect(result).toContain("No warehouses are configured") + expect(result).toContain("warehouse_add") + }) + + test("translates `?` placeholder compile errors", () => { + const result = translateExplainError( + new Error("OperationFailedError: SQL compilation error: syntax error line 1 at position 8 unexpected ?."), + "snowflake", + ["snowflake"], + ) + expect(result).toContain("unsubstituted `?` placeholder") + expect(result).toContain("inline the literal value") + }) + + test("translates EXPLAIN ANALYZE-not-supported errors", () => { + const result = translateExplainError( + new Error("EXPLAIN ANALYZE is not supported on this warehouse"), + "snowflake", + ["snowflake"], + ) + expect(result).toContain("does not support EXPLAIN ANALYZE") + expect(result).toContain("analyze=false") + }) + + test("translates permission errors", () => { + const result = translateExplainError( + new Error("permission denied for table sensitive_data"), + "snowflake", + ["snowflake"], + ) + expect(result).toContain("lacks permission") + expect(result).toContain("role grants") + }) + + test("translates generic SQL compilation errors", () => { + const result = translateExplainError( + new Error("SQL compilation error: unknown identifier 'nonsense'"), + "snowflake", + ["snowflake"], + ) + expect(result).toContain("SQL compilation error") + expect(result).toContain("Fix the SQL") + }) + + test("passes through truly unknown errors", () => { + const result = translateExplainError(new Error("some exotic network failure"), "wh", ["wh"]) + expect(result).toBe("some exotic network failure") + }) + + test("accepts non-Error values", () => { + const result = translateExplainError("raw string error", "wh", []) + expect(result).toBe("raw string error") + }) +}) + +// --------------------------------------------------------------------------- +// Integration: Tool.execute with mocked Dispatcher +// --------------------------------------------------------------------------- + +describe("SqlExplainTool.execute", () => { + let dispatcherSpy: ReturnType | undefined + + afterEach(() => { + dispatcherSpy?.mockRestore() + dispatcherSpy = undefined + }) + + function mockDispatcher(response: unknown) { + dispatcherSpy?.mockRestore() + dispatcherSpy = spyOn(Dispatcher, "call").mockImplementation(async () => response as never) + } + + test("rejects empty sql before calling dispatcher", async () => { + const spy = spyOn(Dispatcher, "call") + const tool = await SqlExplainTool.init() + const result = await tool.execute({ sql: "", analyze: false }, ctx as any) + + expect(result.metadata.success).toBe(false) + expect(result.metadata.error_class).toBe("input_validation") + expect(result.title).toContain("INVALID INPUT") + expect(spy).not.toHaveBeenCalled() + spy.mockRestore() + }) + + test("rejects bare `?` sql before calling dispatcher", async () => { + const spy = spyOn(Dispatcher, "call") + const tool = await SqlExplainTool.init() + const result = await tool.execute({ sql: "?", analyze: false }, ctx as any) + + expect(result.metadata.success).toBe(false) + expect(result.metadata.error_class).toBe("input_validation") + expect(String(result.output)).toContain("placeholder") + expect(spy).not.toHaveBeenCalled() + spy.mockRestore() + }) + + test("rejects `?` warehouse name before calling dispatcher", async () => { + const spy = spyOn(Dispatcher, "call") + const tool = await SqlExplainTool.init() + const result = await tool.execute( + { sql: "SELECT 1 FROM dual", warehouse: "?", analyze: false }, + ctx as any, + ) + + expect(result.metadata.success).toBe(false) + expect(result.metadata.error_class).toBe("input_validation") + expect(String(result.output)).toContain("warehouse") + expect(spy).not.toHaveBeenCalled() + spy.mockRestore() + }) + + test("returns success when dispatcher reports success", async () => { + mockDispatcher({ + success: true, + plan_text: "Seq Scan on users", + plan_rows: [{ line: 1, text: "Seq Scan on users" }], + warehouse_type: "postgres", + analyzed: false, + }) + + const tool = await SqlExplainTool.init() + const result = await tool.execute({ sql: "SELECT * FROM users", analyze: false }, ctx as any) + + expect(result.metadata.success).toBe(true) + expect(result.metadata.warehouse_type).toBe("postgres") + expect(result.title).toContain("PLAN") + expect(String(result.output)).toContain("Seq Scan on users") + }) + + test("returns failure when dispatcher reports failure", async () => { + mockDispatcher({ + success: false, + plan_rows: [], + error: "Warehouse \"prod\" is not configured. Available warehouses: dev, staging.", + warehouse_type: undefined, + analyzed: false, + }) + + const tool = await SqlExplainTool.init() + const result = await tool.execute( + { sql: "SELECT * FROM users", warehouse: "prod", analyze: false }, + ctx as any, + ) + + expect(result.metadata.success).toBe(false) + expect(result.title).toContain("FAILED") + expect(String(result.output)).toContain("not configured") + }) + + test("returns ERROR title when dispatcher throws", async () => { + dispatcherSpy = spyOn(Dispatcher, "call").mockImplementation(async () => { + throw new Error("dispatcher exploded") + }) + + const tool = await SqlExplainTool.init() + const result = await tool.execute({ sql: "SELECT 1 FROM dual", analyze: false }, ctx as any) + + expect(result.metadata.success).toBe(false) + expect(result.title).toContain("ERROR") + expect(String(result.output)).toContain("dispatcher exploded") + }) +}) From 149f4d38858fe6502c46fa14d0f6df8cb914f925 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Fri, 10 Apr 2026 15:50:05 -0700 Subject: [PATCH 2/2] =?UTF-8?q?fix:=20address=20cubic=20review=20=E2=80=94?= =?UTF-8?q?=20avoid=20double=20blank=20line=20in=20validate=20output?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cubic flagged `altimate_core_validate`'s no-schema note: when the note was pushed into the `lines` array, its leading `\n\n` combined with the `\n` join produced three consecutive newlines in the rendered output. Fixed by storing the note without the leading blanks and inserting an explicit empty line at each call site. Also: - Extracted the warning to a module-level `NO_SCHEMA_NOTE` constant so the text lives in one place. - Added regression tests that assert the rendered output never contains three consecutive newlines for either the valid or the failure path. Co-Authored-By: Claude Opus 4.6 (1M context) --- .../altimate/tools/altimate-core-validate.ts | 25 +++++++++++++------ .../tools/altimate-core-validate.test.ts | 20 +++++++++++++++ 2 files changed, 38 insertions(+), 7 deletions(-) diff --git a/packages/opencode/src/altimate/tools/altimate-core-validate.ts b/packages/opencode/src/altimate/tools/altimate-core-validate.ts index df64a7934..020ec7acb 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-validate.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-validate.ts @@ -74,20 +74,31 @@ function classifyValidationError(message: string): string { return "validation_error" } +// Warning appended when validation runs without a schema. Stored without a +// leading blank so it can be pushed into a line array; a blank line is added +// explicitly at each call site where spacing is desired. +const NO_SCHEMA_NOTE = + "Note: No schema was provided, so table/column existence checks were skipped. " + + "To catch missing tables or columns, run `schema_inspect` on the referenced tables first " + + "or pass `schema_context` inline with {tables: {...}}." + function formatValidate(data: Record, hasSchema: boolean): string { if (data.error) return `Error: ${data.error}` - const schemaNote = hasSchema - ? "" - : "\n\nNote: No schema was provided, so table/column existence checks were skipped. " + - "To catch missing tables or columns, run `schema_inspect` on the referenced tables first " + - "or pass `schema_context` inline with {tables: {...}}." - if (data.valid) return `SQL is valid.${schemaNote}` + if (data.valid) { + return hasSchema ? "SQL is valid." : `SQL is valid.\n\n${NO_SCHEMA_NOTE}` + } const lines = ["Validation failed:\n"] for (const err of data.errors ?? []) { lines.push(` • ${err.message}`) if (err.location) lines.push(` at line ${err.location.line}`) } - if (schemaNote) lines.push(schemaNote) + if (!hasSchema) { + // Blank separator line, then the note — avoids the double newline that + // would appear if the note itself started with `\n\n` and was joined + // with `\n`. + lines.push("") + lines.push(NO_SCHEMA_NOTE) + } return lines.join("\n") } diff --git a/packages/opencode/test/altimate/tools/altimate-core-validate.test.ts b/packages/opencode/test/altimate/tools/altimate-core-validate.test.ts index 827044736..418705491 100644 --- a/packages/opencode/test/altimate/tools/altimate-core-validate.test.ts +++ b/packages/opencode/test/altimate/tools/altimate-core-validate.test.ts @@ -133,6 +133,26 @@ describe("formatValidate", () => { expect(result).toContain("syntax error") expect(result).toContain("No schema was provided") }) + + test("no double blank line between valid SQL and the schema note", () => { + // Regression: previously the note started with `\n\n` which produced + // `SQL is valid.\n\n\n\nNote:` when concatenated. One blank separator is + // enough. + const result = formatValidate({ valid: true }, false) + expect(result).not.toContain("\n\n\n") + }) + + test("no extra blank line between failure list and the schema note", () => { + // Regression: pushing a note that starts with `\n\n` into the `lines` + // array and joining on `\n` produced three consecutive newlines. + const result = formatValidate( + { valid: false, errors: [{ message: "missing column foo" }] }, + false, + ) + expect(result).toContain("missing column foo") + expect(result).toContain("No schema was provided") + expect(result).not.toContain("\n\n\n") + }) }) // ---------------------------------------------------------------------------