From 5ff1f2898e246ffbb1b868bc70e0b8674321c071 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 16 Apr 2026 12:35:07 -0700 Subject: [PATCH 01/10] Tracing v1 --- .../api/copilot/api-keys/generate/route.ts | 6 +- apps/sim/app/api/copilot/api-keys/route.ts | 11 +- .../api/copilot/auto-allowed-tools/route.ts | 26 +- apps/sim/app/api/copilot/chat/abort/route.ts | 9 +- .../app/api/copilot/chat/stream/route.test.ts | 208 +- apps/sim/app/api/copilot/chat/stream/route.ts | 308 +- apps/sim/app/api/copilot/models/route.ts | 5 +- apps/sim/app/api/copilot/stats/route.ts | 5 +- apps/sim/app/api/mcp/copilot/route.ts | 688 ++-- apps/sim/instrumentation-node.ts | 181 +- apps/sim/lib/copilot/async-runs/repository.ts | 137 +- .../generated/mothership-stream-v1-schema.ts | 3001 ++++++++++------- .../copilot/generated/mothership-stream-v1.ts | 748 ++-- apps/sim/lib/copilot/request/go/fetch.test.ts | 91 + apps/sim/lib/copilot/request/go/fetch.ts | 121 + .../sim/lib/copilot/request/go/propagation.ts | 26 + .../sim/lib/copilot/request/go/stream.test.ts | 385 ++- apps/sim/lib/copilot/request/go/stream.ts | 362 +- apps/sim/lib/copilot/request/handlers/span.ts | 69 +- .../lib/copilot/request/lifecycle/finalize.ts | 37 +- .../request/lifecycle/headless.test.ts | 251 +- .../lib/copilot/request/lifecycle/headless.ts | 157 +- apps/sim/lib/copilot/request/lifecycle/run.ts | 512 +-- .../copilot/request/lifecycle/start.test.ts | 259 +- .../lib/copilot/request/lifecycle/start.ts | 509 +-- apps/sim/lib/copilot/request/otel.ts | 166 + .../copilot/request/session/contract.test.ts | 238 +- .../lib/copilot/request/session/contract.ts | 442 +-- .../copilot/request/session/explicit-abort.ts | 74 +- .../copilot/request/session/recovery.test.ts | 38 + .../lib/copilot/request/session/recovery.ts | 86 +- apps/sim/lib/copilot/request/subagent.ts | 199 +- .../sim/lib/copilot/request/tools/executor.ts | 113 + apps/sim/lib/copilot/request/trace.ts | 102 +- apps/sim/lib/copilot/request/types.ts | 2 + .../lib/copilot/tools/handlers/vfs.test.ts | 52 +- apps/sim/lib/copilot/tools/handlers/vfs.ts | 72 +- apps/sim/lib/copilot/vfs/file-reader.test.ts | 92 + apps/sim/lib/copilot/vfs/file-reader.ts | 155 +- apps/sim/providers/models.ts | 20 + 40 files changed, 6204 insertions(+), 3759 deletions(-) create mode 100644 apps/sim/lib/copilot/request/go/fetch.test.ts create mode 100644 apps/sim/lib/copilot/request/go/fetch.ts create mode 100644 apps/sim/lib/copilot/request/go/propagation.ts create mode 100644 apps/sim/lib/copilot/request/otel.ts create mode 100644 apps/sim/lib/copilot/request/session/recovery.test.ts create mode 100644 apps/sim/lib/copilot/vfs/file-reader.test.ts diff --git a/apps/sim/app/api/copilot/api-keys/generate/route.ts b/apps/sim/app/api/copilot/api-keys/generate/route.ts index 27971cede75..66febeed24f 100644 --- a/apps/sim/app/api/copilot/api-keys/generate/route.ts +++ b/apps/sim/app/api/copilot/api-keys/generate/route.ts @@ -2,6 +2,7 @@ import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { getSession } from '@/lib/auth' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { fetchGo } from '@/lib/copilot/request/go/fetch' import { env } from '@/lib/core/config/env' const GenerateApiKeySchema = z.object({ @@ -32,13 +33,16 @@ export async function POST(req: NextRequest) { const { name } = validationResult.data - const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key/generate`, { + const res = await fetchGo(`${SIM_AGENT_API_URL}/api/validate-key/generate`, { method: 'POST', headers: { 'Content-Type': 'application/json', ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), }, body: JSON.stringify({ userId, name }), + spanName: 'sim → go /api/validate-key/generate', + operation: 'generate_api_key', + attributes: { 'user.id': userId }, }) if (!res.ok) { diff --git a/apps/sim/app/api/copilot/api-keys/route.ts b/apps/sim/app/api/copilot/api-keys/route.ts index 02d0d5be2b0..4b4d5862ba5 100644 --- a/apps/sim/app/api/copilot/api-keys/route.ts +++ b/apps/sim/app/api/copilot/api-keys/route.ts @@ -1,6 +1,7 @@ import { type NextRequest, NextResponse } from 'next/server' import { getSession } from '@/lib/auth' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { fetchGo } from '@/lib/copilot/request/go/fetch' import { env } from '@/lib/core/config/env' export async function GET(request: NextRequest) { @@ -12,13 +13,16 @@ export async function GET(request: NextRequest) { const userId = session.user.id - const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key/get-api-keys`, { + const res = await fetchGo(`${SIM_AGENT_API_URL}/api/validate-key/get-api-keys`, { method: 'POST', headers: { 'Content-Type': 'application/json', ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), }, body: JSON.stringify({ userId }), + spanName: 'sim → go /api/validate-key/get-api-keys', + operation: 'get_api_keys', + attributes: { 'user.id': userId }, }) if (!res.ok) { @@ -66,13 +70,16 @@ export async function DELETE(request: NextRequest) { return NextResponse.json({ error: 'id is required' }, { status: 400 }) } - const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key/delete`, { + const res = await fetchGo(`${SIM_AGENT_API_URL}/api/validate-key/delete`, { method: 'POST', headers: { 'Content-Type': 'application/json', ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), }, body: JSON.stringify({ userId, apiKeyId: id }), + spanName: 'sim → go /api/validate-key/delete', + operation: 'delete_api_key', + attributes: { 'user.id': userId, 'api_key.id': id }, }) if (!res.ok) { diff --git a/apps/sim/app/api/copilot/auto-allowed-tools/route.ts b/apps/sim/app/api/copilot/auto-allowed-tools/route.ts index 61343d7541b..e02262d83ef 100644 --- a/apps/sim/app/api/copilot/auto-allowed-tools/route.ts +++ b/apps/sim/app/api/copilot/auto-allowed-tools/route.ts @@ -2,6 +2,7 @@ import { createLogger } from '@sim/logger' import { type NextRequest, NextResponse } from 'next/server' import { getSession } from '@/lib/auth' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { fetchGo } from '@/lib/copilot/request/go/fetch' import { env } from '@/lib/core/config/env' const logger = createLogger('CopilotAutoAllowedToolsAPI') @@ -30,9 +31,15 @@ export async function GET() { const userId = session.user.id - const res = await fetch( + const res = await fetchGo( `${SIM_AGENT_API_URL}/api/tool-preferences/auto-allowed?userId=${encodeURIComponent(userId)}`, - { method: 'GET', headers: copilotHeaders() } + { + method: 'GET', + headers: copilotHeaders(), + spanName: 'sim → go /api/tool-preferences/auto-allowed', + operation: 'list_auto_allowed_tools', + attributes: { 'user.id': userId }, + } ) if (!res.ok) { @@ -66,10 +73,13 @@ export async function POST(request: NextRequest) { return NextResponse.json({ error: 'toolId must be a string' }, { status: 400 }) } - const res = await fetch(`${SIM_AGENT_API_URL}/api/tool-preferences/auto-allowed`, { + const res = await fetchGo(`${SIM_AGENT_API_URL}/api/tool-preferences/auto-allowed`, { method: 'POST', headers: copilotHeaders(), body: JSON.stringify({ userId, toolId: body.toolId }), + spanName: 'sim → go /api/tool-preferences/auto-allowed', + operation: 'add_auto_allowed_tool', + attributes: { 'user.id': userId, 'tool.id': body.toolId }, }) if (!res.ok) { @@ -107,9 +117,15 @@ export async function DELETE(request: NextRequest) { return NextResponse.json({ error: 'toolId query parameter is required' }, { status: 400 }) } - const res = await fetch( + const res = await fetchGo( `${SIM_AGENT_API_URL}/api/tool-preferences/auto-allowed?userId=${encodeURIComponent(userId)}&toolId=${encodeURIComponent(toolId)}`, - { method: 'DELETE', headers: copilotHeaders() } + { + method: 'DELETE', + headers: copilotHeaders(), + spanName: 'sim → go /api/tool-preferences/auto-allowed', + operation: 'remove_auto_allowed_tool', + attributes: { 'user.id': userId, 'tool.id': toolId }, + } ) if (!res.ok) { diff --git a/apps/sim/app/api/copilot/chat/abort/route.ts b/apps/sim/app/api/copilot/chat/abort/route.ts index 375065eb418..4b836be8022 100644 --- a/apps/sim/app/api/copilot/chat/abort/route.ts +++ b/apps/sim/app/api/copilot/chat/abort/route.ts @@ -2,6 +2,7 @@ import { createLogger } from '@sim/logger' import { NextResponse } from 'next/server' import { getLatestRunForStream } from '@/lib/copilot/async-runs/repository' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { fetchGo } from '@/lib/copilot/request/go/fetch' import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http' import { abortActiveStream, waitForPendingChatStream } from '@/lib/copilot/request/session' import { env } from '@/lib/core/config/env' @@ -54,7 +55,7 @@ export async function POST(request: Request) { () => controller.abort('timeout:go_explicit_abort_fetch'), GO_EXPLICIT_ABORT_TIMEOUT_MS ) - const response = await fetch(`${SIM_AGENT_API_URL}/api/streams/explicit-abort`, { + const response = await fetchGo(`${SIM_AGENT_API_URL}/api/streams/explicit-abort`, { method: 'POST', headers, signal: controller.signal, @@ -63,6 +64,12 @@ export async function POST(request: Request) { userId: authenticatedUserId, ...(chatId ? { chatId } : {}), }), + spanName: 'sim → go /api/streams/explicit-abort', + operation: 'explicit_abort', + attributes: { + 'copilot.stream.id': streamId, + ...(chatId ? { 'chat.id': chatId } : {}), + }, }).finally(() => clearTimeout(timeout)) if (!response.ok) { throw new Error(`Explicit abort marker request failed: ${response.status}`) diff --git a/apps/sim/app/api/copilot/chat/stream/route.test.ts b/apps/sim/app/api/copilot/chat/stream/route.test.ts index 3105f9216ce..8b7801754e1 100644 --- a/apps/sim/app/api/copilot/chat/stream/route.test.ts +++ b/apps/sim/app/api/copilot/chat/stream/route.test.ts @@ -2,12 +2,12 @@ * @vitest-environment node */ -import { NextRequest } from 'next/server' -import { beforeEach, describe, expect, it, vi } from 'vitest' +import { NextRequest } from "next/server"; +import { beforeEach, describe, expect, it, vi } from "vitest"; import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, -} from '@/lib/copilot/generated/mothership-stream-v1' +} from "@/lib/copilot/generated/mothership-stream-v1"; const { getLatestRunForStream, @@ -21,13 +21,13 @@ const { readFilePreviewSessions: vi.fn(), checkForReplayGap: vi.fn(), authenticateCopilotRequestSessionOnly: vi.fn(), -})) +})); -vi.mock('@/lib/copilot/async-runs/repository', () => ({ +vi.mock("@/lib/copilot/async-runs/repository", () => ({ getLatestRunForStream, -})) +})); -vi.mock('@/lib/copilot/request/session', () => ({ +vi.mock("@/lib/copilot/request/session", () => ({ readEvents, readFilePreviewSessions, checkForReplayGap, @@ -37,134 +37,180 @@ vi.mock('@/lib/copilot/request/session', () => ({ cursor: event.cursor, }, seq: event.seq, - trace: { requestId: event.requestId ?? '' }, + trace: { requestId: event.requestId ?? "" }, type: event.type, payload: event.payload, }), encodeSSEEnvelope: (event: Record) => new TextEncoder().encode(`data: ${JSON.stringify(event)}\n\n`), SSE_RESPONSE_HEADERS: { - 'Content-Type': 'text/event-stream', + "Content-Type": "text/event-stream", }, -})) +})); -vi.mock('@/lib/copilot/request/http', () => ({ +vi.mock("@/lib/copilot/request/http", () => ({ authenticateCopilotRequestSessionOnly, -})) +})); -import { GET } from './route' +import { GET } from "./route"; async function readAllChunks(response: Response): Promise { - const reader = response.body?.getReader() - expect(reader).toBeTruthy() + const reader = response.body?.getReader(); + expect(reader).toBeTruthy(); - const chunks: string[] = [] + const chunks: string[] = []; while (true) { - const { done, value } = await reader!.read() + const { done, value } = await reader!.read(); if (done) { - break + break; } - chunks.push(new TextDecoder().decode(value)) + chunks.push(new TextDecoder().decode(value)); } - return chunks + return chunks; } -describe('copilot chat stream replay route', () => { +describe("copilot chat stream replay route", () => { beforeEach(() => { - vi.clearAllMocks() + vi.clearAllMocks(); authenticateCopilotRequestSessionOnly.mockResolvedValue({ - userId: 'user-1', + userId: "user-1", isAuthenticated: true, - }) - readEvents.mockResolvedValue([]) - readFilePreviewSessions.mockResolvedValue([]) - checkForReplayGap.mockResolvedValue(null) - }) + }); + readEvents.mockResolvedValue([]); + readFilePreviewSessions.mockResolvedValue([]); + checkForReplayGap.mockResolvedValue(null); + }); - it('returns preview sessions in batch mode', async () => { + it("returns preview sessions in batch mode", async () => { getLatestRunForStream.mockResolvedValue({ - status: 'active', - executionId: 'exec-1', - id: 'run-1', - }) + status: "active", + executionId: "exec-1", + id: "run-1", + }); readFilePreviewSessions.mockResolvedValue([ { schemaVersion: 1, - id: 'preview-1', - streamId: 'stream-1', - toolCallId: 'preview-1', - status: 'streaming', - fileName: 'draft.md', - previewText: 'hello', + id: "preview-1", + streamId: "stream-1", + toolCallId: "preview-1", + status: "streaming", + fileName: "draft.md", + previewText: "hello", previewVersion: 2, - updatedAt: '2026-04-10T00:00:00.000Z', + updatedAt: "2026-04-10T00:00:00.000Z", }, - ]) + ]); const response = await GET( new NextRequest( - 'http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0&batch=true' - ) - ) + "http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0&batch=true", + ), + ); - expect(response.status).toBe(200) + expect(response.status).toBe(200); await expect(response.json()).resolves.toMatchObject({ success: true, previewSessions: [ expect.objectContaining({ - id: 'preview-1', - previewText: 'hello', + id: "preview-1", + previewText: "hello", previewVersion: 2, }), ], - status: 'active', - }) - }) + status: "active", + }); + }); - it('stops replay polling when run becomes cancelled', async () => { + it("stops replay polling when run becomes cancelled", async () => { getLatestRunForStream .mockResolvedValueOnce({ - status: 'active', - executionId: 'exec-1', - id: 'run-1', + status: "active", + executionId: "exec-1", + id: "run-1", }) .mockResolvedValueOnce({ - status: 'cancelled', - executionId: 'exec-1', - id: 'run-1', - }) + status: "cancelled", + executionId: "exec-1", + id: "run-1", + }); const response = await GET( - new NextRequest('http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0') - ) + new NextRequest( + "http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0", + ), + ); - const chunks = await readAllChunks(response) - expect(chunks.join('')).toContain( + const chunks = await readAllChunks(response); + expect(chunks.join("")).toContain( JSON.stringify({ status: MothershipStreamV1CompletionStatus.cancelled, - reason: 'terminal_status', + reason: "terminal_status", + }), + ); + expect(getLatestRunForStream).toHaveBeenCalledTimes(2); + }); + + it("emits structured terminal replay error when run metadata disappears", async () => { + getLatestRunForStream + .mockResolvedValueOnce({ + status: "active", + executionId: "exec-1", + id: "run-1", }) - ) - expect(getLatestRunForStream).toHaveBeenCalledTimes(2) - }) + .mockResolvedValueOnce(null); - it('emits structured terminal replay error when run metadata disappears', async () => { + const response = await GET( + new NextRequest( + "http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0", + ), + ); + + const chunks = await readAllChunks(response); + const body = chunks.join(""); + expect(body).toContain(`"type":"${MothershipStreamV1EventType.error}"`); + expect(body).toContain('"code":"resume_run_unavailable"'); + expect(body).toContain(`"type":"${MothershipStreamV1EventType.complete}"`); + }); + + it("uses the latest live request id for synthetic terminal replay events", async () => { getLatestRunForStream .mockResolvedValueOnce({ - status: 'active', - executionId: 'exec-1', - id: 'run-1', + status: "active", + executionId: "exec-1", + id: "run-1", }) - .mockResolvedValueOnce(null) + .mockResolvedValueOnce({ + status: "cancelled", + executionId: "exec-1", + id: "run-1", + }); + readEvents + .mockResolvedValueOnce([ + { + stream: { streamId: "stream-1", cursor: "1" }, + seq: 1, + trace: { requestId: "req-live-123" }, + type: MothershipStreamV1EventType.text, + payload: { + channel: "assistant", + text: "hello", + }, + }, + ]) + .mockResolvedValueOnce([]); const response = await GET( - new NextRequest('http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0') - ) - - const chunks = await readAllChunks(response) - const body = chunks.join('') - expect(body).toContain(`"type":"${MothershipStreamV1EventType.error}"`) - expect(body).toContain('"code":"resume_run_unavailable"') - expect(body).toContain(`"type":"${MothershipStreamV1EventType.complete}"`) - }) -}) + new NextRequest( + "http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0", + ), + ); + + const chunks = await readAllChunks(response); + const terminalChunk = chunks[chunks.length - 1] ?? ""; + expect(terminalChunk).toContain( + `"type":"${MothershipStreamV1EventType.complete}"`, + ); + expect(terminalChunk).toContain('"requestId":"req-live-123"'); + expect(terminalChunk).toContain('"status":"cancelled"'); + }); +}); diff --git a/apps/sim/app/api/copilot/chat/stream/route.ts b/apps/sim/app/api/copilot/chat/stream/route.ts index 5028ecf7e5e..ad6593cf381 100644 --- a/apps/sim/app/api/copilot/chat/stream/route.ts +++ b/apps/sim/app/api/copilot/chat/stream/route.ts @@ -1,11 +1,11 @@ -import { createLogger } from '@sim/logger' -import { type NextRequest, NextResponse } from 'next/server' -import { getLatestRunForStream } from '@/lib/copilot/async-runs/repository' +import { createLogger } from "@sim/logger"; +import { type NextRequest, NextResponse } from "next/server"; +import { getLatestRunForStream } from "@/lib/copilot/async-runs/repository"; import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, -} from '@/lib/copilot/generated/mothership-stream-v1' -import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http' +} from "@/lib/copilot/generated/mothership-stream-v1"; +import { authenticateCopilotRequestSessionOnly } from "@/lib/copilot/request/http"; import { checkForReplayGap, createEvent, @@ -13,36 +13,65 @@ import { readEvents, readFilePreviewSessions, SSE_RESPONSE_HEADERS, -} from '@/lib/copilot/request/session' -import { toStreamBatchEvent } from '@/lib/copilot/request/session/types' +} from "@/lib/copilot/request/session"; +import { toStreamBatchEvent } from "@/lib/copilot/request/session/types"; -export const maxDuration = 3600 +export const maxDuration = 3600; -const logger = createLogger('CopilotChatStreamAPI') -const POLL_INTERVAL_MS = 250 -const MAX_STREAM_MS = 60 * 60 * 1000 +const logger = createLogger("CopilotChatStreamAPI"); +const POLL_INTERVAL_MS = 250; +const MAX_STREAM_MS = 60 * 60 * 1000; + +function extractCanonicalRequestId(value: unknown): string { + return typeof value === "string" && value.length > 0 ? value : ""; +} + +function extractRunRequestId( + run: { requestContext?: unknown } | null | undefined, +): string { + if ( + !run || + typeof run.requestContext !== "object" || + run.requestContext === null + ) { + return ""; + } + const requestContext = run.requestContext as Record; + return ( + extractCanonicalRequestId(requestContext.requestId) || + extractCanonicalRequestId(requestContext.simRequestId) + ); +} + +function extractEnvelopeRequestId(envelope: { + trace?: { requestId?: unknown }; +}): string { + return extractCanonicalRequestId(envelope.trace?.requestId); +} function isTerminalStatus( - status: string | null | undefined + status: string | null | undefined, ): status is MothershipStreamV1CompletionStatus { return ( status === MothershipStreamV1CompletionStatus.complete || status === MothershipStreamV1CompletionStatus.error || status === MothershipStreamV1CompletionStatus.cancelled - ) + ); } function buildResumeTerminalEnvelopes(options: { - streamId: string - afterCursor: string - status: MothershipStreamV1CompletionStatus - message?: string - code: string - reason?: string + streamId: string; + afterCursor: string; + status: MothershipStreamV1CompletionStatus; + message?: string; + code: string; + reason?: string; + requestId?: string; }) { - const baseSeq = Number(options.afterCursor || '0') - const seq = Number.isFinite(baseSeq) ? baseSeq : 0 - const envelopes: ReturnType[] = [] + const baseSeq = Number(options.afterCursor || "0"); + const seq = Number.isFinite(baseSeq) ? baseSeq : 0; + const envelopes: ReturnType[] = []; + const rid = options.requestId ?? ""; if (options.status === MothershipStreamV1CompletionStatus.error) { envelopes.push( @@ -50,14 +79,15 @@ function buildResumeTerminalEnvelopes(options: { streamId: options.streamId, cursor: String(seq + 1), seq: seq + 1, - requestId: '', + requestId: rid, type: MothershipStreamV1EventType.error, payload: { - message: options.message || 'Stream recovery failed before completion.', + message: + options.message || "Stream recovery failed before completion.", code: options.code, }, - }) - ) + }), + ); } envelopes.push( @@ -65,241 +95,259 @@ function buildResumeTerminalEnvelopes(options: { streamId: options.streamId, cursor: String(seq + envelopes.length + 1), seq: seq + envelopes.length + 1, - requestId: '', + requestId: rid, type: MothershipStreamV1EventType.complete, payload: { status: options.status, ...(options.reason ? { reason: options.reason } : {}), }, - }) - ) + }), + ); - return envelopes + return envelopes; } export async function GET(request: NextRequest) { const { userId: authenticatedUserId, isAuthenticated } = - await authenticateCopilotRequestSessionOnly() + await authenticateCopilotRequestSessionOnly(); if (!isAuthenticated || !authenticatedUserId) { - return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) + return NextResponse.json({ error: "Unauthorized" }, { status: 401 }); } - const url = new URL(request.url) - const streamId = url.searchParams.get('streamId') || '' - const afterCursor = url.searchParams.get('after') || '' - const batchMode = url.searchParams.get('batch') === 'true' + const url = new URL(request.url); + const streamId = url.searchParams.get("streamId") || ""; + const afterCursor = url.searchParams.get("after") || ""; + const batchMode = url.searchParams.get("batch") === "true"; if (!streamId) { - return NextResponse.json({ error: 'streamId is required' }, { status: 400 }) + return NextResponse.json( + { error: "streamId is required" }, + { status: 400 }, + ); } - const run = await getLatestRunForStream(streamId, authenticatedUserId).catch((err) => { - logger.warn('Failed to fetch latest run for stream', { - streamId, - error: err instanceof Error ? err.message : String(err), - }) - return null - }) - logger.info('[Resume] Stream lookup', { + const run = await getLatestRunForStream(streamId, authenticatedUserId).catch( + (err) => { + logger.warn("Failed to fetch latest run for stream", { + streamId, + error: err instanceof Error ? err.message : String(err), + }); + return null; + }, + ); + logger.info("[Resume] Stream lookup", { streamId, afterCursor, batchMode, hasRun: !!run, runStatus: run?.status, - }) + }); if (!run) { - return NextResponse.json({ error: 'Stream not found' }, { status: 404 }) + return NextResponse.json({ error: "Stream not found" }, { status: 404 }); } if (batchMode) { - const afterSeq = afterCursor || '0' + const afterSeq = afterCursor || "0"; const [events, previewSessions] = await Promise.all([ readEvents(streamId, afterSeq), readFilePreviewSessions(streamId).catch((error) => { - logger.warn('Failed to read preview sessions for stream batch', { + logger.warn("Failed to read preview sessions for stream batch", { streamId, error: error instanceof Error ? error.message : String(error), - }) - return [] + }); + return []; }), - ]) - const batchEvents = events.map(toStreamBatchEvent) - logger.info('[Resume] Batch response', { + ]); + const batchEvents = events.map(toStreamBatchEvent); + logger.info("[Resume] Batch response", { streamId, afterCursor: afterSeq, eventCount: batchEvents.length, previewSessionCount: previewSessions.length, runStatus: run.status, - }) + }); return NextResponse.json({ success: true, events: batchEvents, previewSessions, status: run.status, - }) + }); } - const startTime = Date.now() + const startTime = Date.now(); const stream = new ReadableStream({ async start(controller) { - let cursor = afterCursor || '0' - let controllerClosed = false - let sawTerminalEvent = false + let cursor = afterCursor || "0"; + let controllerClosed = false; + let sawTerminalEvent = false; + let currentRequestId = extractRunRequestId(run); const closeController = () => { - if (controllerClosed) return - controllerClosed = true + if (controllerClosed) return; + controllerClosed = true; try { - controller.close() + controller.close(); } catch { // Controller already closed by runtime/client } - } + }; const enqueueEvent = (payload: unknown) => { - if (controllerClosed) return false + if (controllerClosed) return false; try { - controller.enqueue(encodeSSEEnvelope(payload)) - return true + controller.enqueue(encodeSSEEnvelope(payload)); + return true; } catch { - controllerClosed = true - return false + controllerClosed = true; + return false; } - } + }; const abortListener = () => { - controllerClosed = true - } - request.signal.addEventListener('abort', abortListener, { once: true }) + controllerClosed = true; + }; + request.signal.addEventListener("abort", abortListener, { once: true }); const flushEvents = async () => { - const events = await readEvents(streamId, cursor) + const events = await readEvents(streamId, cursor); if (events.length > 0) { - logger.info('[Resume] Flushing events', { + logger.info("[Resume] Flushing events", { streamId, afterCursor: cursor, eventCount: events.length, - }) + }); } for (const envelope of events) { - cursor = envelope.stream.cursor ?? String(envelope.seq) + cursor = envelope.stream.cursor ?? String(envelope.seq); + currentRequestId = + extractEnvelopeRequestId(envelope) || currentRequestId; if (envelope.type === MothershipStreamV1EventType.complete) { - sawTerminalEvent = true + sawTerminalEvent = true; } if (!enqueueEvent(envelope)) { - break + break; } } - } + }; const emitTerminalIfMissing = ( status: MothershipStreamV1CompletionStatus, - options?: { message?: string; code: string; reason?: string } + options?: { message?: string; code: string; reason?: string }, ) => { if (controllerClosed || sawTerminalEvent) { - return + return; } for (const envelope of buildResumeTerminalEnvelopes({ streamId, afterCursor: cursor, status, message: options?.message, - code: options?.code ?? 'resume_terminal', + code: options?.code ?? "resume_terminal", reason: options?.reason, + requestId: currentRequestId, })) { - cursor = envelope.stream.cursor ?? String(envelope.seq) + cursor = envelope.stream.cursor ?? String(envelope.seq); if (envelope.type === MothershipStreamV1EventType.complete) { - sawTerminalEvent = true + sawTerminalEvent = true; } if (!enqueueEvent(envelope)) { - break + break; } } - } + }; try { - const gap = await checkForReplayGap(streamId, afterCursor) + const gap = await checkForReplayGap( + streamId, + afterCursor, + currentRequestId, + ); if (gap) { for (const envelope of gap.envelopes) { - enqueueEvent(envelope) + enqueueEvent(envelope); } - return + return; } - await flushEvents() + await flushEvents(); while (!controllerClosed && Date.now() - startTime < MAX_STREAM_MS) { - const currentRun = await getLatestRunForStream(streamId, authenticatedUserId).catch( - (err) => { - logger.warn('Failed to poll latest run for stream', { - streamId, - error: err instanceof Error ? err.message : String(err), - }) - return null - } - ) + const currentRun = await getLatestRunForStream( + streamId, + authenticatedUserId, + ).catch((err) => { + logger.warn("Failed to poll latest run for stream", { + streamId, + error: err instanceof Error ? err.message : String(err), + }); + return null; + }); if (!currentRun) { emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, { - message: 'The stream could not be recovered because its run metadata is unavailable.', - code: 'resume_run_unavailable', - reason: 'run_unavailable', - }) - break + message: + "The stream could not be recovered because its run metadata is unavailable.", + code: "resume_run_unavailable", + reason: "run_unavailable", + }); + break; } - await flushEvents() + currentRequestId = + extractRunRequestId(currentRun) || currentRequestId; + + await flushEvents(); if (controllerClosed) { - break + break; } if (isTerminalStatus(currentRun.status)) { emitTerminalIfMissing(currentRun.status, { message: currentRun.status === MothershipStreamV1CompletionStatus.error - ? typeof currentRun.error === 'string' + ? typeof currentRun.error === "string" ? currentRun.error - : 'The recovered stream ended with an error.' + : "The recovered stream ended with an error." : undefined, - code: 'resume_terminal_status', - reason: 'terminal_status', - }) - break + code: "resume_terminal_status", + reason: "terminal_status", + }); + break; } if (request.signal.aborted) { - controllerClosed = true - break + controllerClosed = true; + break; } - await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS)) + await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS)); } if (!controllerClosed && Date.now() - startTime >= MAX_STREAM_MS) { emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, { - message: 'The stream recovery timed out before completion.', - code: 'resume_timeout', - reason: 'timeout', - }) + message: "The stream recovery timed out before completion.", + code: "resume_timeout", + reason: "timeout", + }); } } catch (error) { if (!controllerClosed && !request.signal.aborted) { - logger.warn('Stream replay failed', { + logger.warn("Stream replay failed", { streamId, error: error instanceof Error ? error.message : String(error), - }) + }); emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, { - message: 'The stream replay failed before completion.', - code: 'resume_internal', - reason: 'stream_replay_failed', - }) + message: "The stream replay failed before completion.", + code: "resume_internal", + reason: "stream_replay_failed", + }); } } finally { - request.signal.removeEventListener('abort', abortListener) - closeController() + request.signal.removeEventListener("abort", abortListener); + closeController(); } }, - }) + }); - return new Response(stream, { headers: SSE_RESPONSE_HEADERS }) + return new Response(stream, { headers: SSE_RESPONSE_HEADERS }); } diff --git a/apps/sim/app/api/copilot/models/route.ts b/apps/sim/app/api/copilot/models/route.ts index 7e23e38df69..8d5a61432a4 100644 --- a/apps/sim/app/api/copilot/models/route.ts +++ b/apps/sim/app/api/copilot/models/route.ts @@ -1,6 +1,7 @@ import { createLogger } from '@sim/logger' import { type NextRequest, NextResponse } from 'next/server' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { fetchGo } from '@/lib/copilot/request/go/fetch' import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http' interface AvailableModel { @@ -43,10 +44,12 @@ export async function GET(_req: NextRequest) { } try { - const response = await fetch(`${SIM_AGENT_API_URL}/api/get-available-models`, { + const response = await fetchGo(`${SIM_AGENT_API_URL}/api/get-available-models`, { method: 'GET', headers, cache: 'no-store', + spanName: 'sim → go /api/get-available-models', + operation: 'get_available_models', }) const payload = await response.json().catch(() => ({})) diff --git a/apps/sim/app/api/copilot/stats/route.ts b/apps/sim/app/api/copilot/stats/route.ts index 75ed6d096b1..10d3520bde2 100644 --- a/apps/sim/app/api/copilot/stats/route.ts +++ b/apps/sim/app/api/copilot/stats/route.ts @@ -1,6 +1,7 @@ import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { fetchGo } from '@/lib/copilot/request/go/fetch' import { authenticateCopilotRequestSessionOnly, createBadRequestResponse, @@ -39,13 +40,15 @@ export async function POST(req: NextRequest) { diffAccepted, } - const agentRes = await fetch(`${SIM_AGENT_API_URL}/api/stats`, { + const agentRes = await fetchGo(`${SIM_AGENT_API_URL}/api/stats`, { method: 'POST', headers: { 'Content-Type': 'application/json', ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), }, body: JSON.stringify(payload), + spanName: 'sim → go /api/stats', + operation: 'stats_ingest', }) // Prefer not to block clients; still relay status diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index f2bc6a2754f..877d24f75c0 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -1,5 +1,5 @@ -import { Server } from '@modelcontextprotocol/sdk/server/index.js' -import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js' +import { Server } from "@modelcontextprotocol/sdk/server/index.js"; +import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js"; import { CallToolRequestSchema, type CallToolResult, @@ -9,103 +9,122 @@ import { type ListToolsResult, McpError, type RequestId, -} from '@modelcontextprotocol/sdk/types.js' -import { db } from '@sim/db' -import { userStats } from '@sim/db/schema' -import { createLogger } from '@sim/logger' -import { eq, sql } from 'drizzle-orm' -import { type NextRequest, NextResponse } from 'next/server' -import { validateOAuthAccessToken } from '@/lib/auth/oauth-token' -import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription' -import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context' -import { ORCHESTRATION_TIMEOUT_MS, SIM_AGENT_API_URL } from '@/lib/copilot/constants' -import { runHeadlessCopilotLifecycle } from '@/lib/copilot/request/lifecycle/headless' -import { orchestrateSubagentStream } from '@/lib/copilot/request/subagent' -import { ensureHandlersRegistered, executeTool } from '@/lib/copilot/tool-executor' -import { prepareExecutionContext } from '@/lib/copilot/tools/handlers/context' -import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/definitions' -import { env } from '@/lib/core/config/env' -import { RateLimiter } from '@/lib/core/rate-limiter' -import { getBaseUrl } from '@/lib/core/utils/urls' -import { generateId } from '@/lib/core/utils/uuid' +} from "@modelcontextprotocol/sdk/types.js"; +import { db } from "@sim/db"; +import { userStats } from "@sim/db/schema"; +import { createLogger } from "@sim/logger"; +import { eq, sql } from "drizzle-orm"; +import { type NextRequest, NextResponse } from "next/server"; +import { validateOAuthAccessToken } from "@/lib/auth/oauth-token"; +import { getHighestPrioritySubscription } from "@/lib/billing/core/subscription"; +import { generateWorkspaceContext } from "@/lib/copilot/chat/workspace-context"; +import { + ORCHESTRATION_TIMEOUT_MS, + SIM_AGENT_API_URL, +} from "@/lib/copilot/constants"; +import { createRequestId } from "@/lib/copilot/request/http"; +import { runHeadlessCopilotLifecycle } from "@/lib/copilot/request/lifecycle/headless"; +import { orchestrateSubagentStream } from "@/lib/copilot/request/subagent"; +import { + ensureHandlersRegistered, + executeTool, +} from "@/lib/copilot/tool-executor"; +import { prepareExecutionContext } from "@/lib/copilot/tools/handlers/context"; +import { + DIRECT_TOOL_DEFS, + SUBAGENT_TOOL_DEFS, +} from "@/lib/copilot/tools/mcp/definitions"; +import { env } from "@/lib/core/config/env"; +import { RateLimiter } from "@/lib/core/rate-limiter"; +import { getBaseUrl } from "@/lib/core/utils/urls"; +import { generateId } from "@/lib/core/utils/uuid"; import { authorizeWorkflowByWorkspacePermission, resolveWorkflowIdForUser, -} from '@/lib/workflows/utils' +} from "@/lib/workflows/utils"; -const logger = createLogger('CopilotMcpAPI') -const mcpRateLimiter = new RateLimiter() -const DEFAULT_COPILOT_MODEL = 'claude-opus-4-6' +const logger = createLogger("CopilotMcpAPI"); +const mcpRateLimiter = new RateLimiter(); +const DEFAULT_COPILOT_MODEL = "claude-opus-4-6"; -export const dynamic = 'force-dynamic' -export const runtime = 'nodejs' -export const maxDuration = 3600 +export const dynamic = "force-dynamic"; +export const runtime = "nodejs"; +export const maxDuration = 3600; interface CopilotKeyAuthResult { - success: boolean - userId?: string - error?: string + success: boolean; + userId?: string; + error?: string; } /** * Validates a copilot API key by forwarding it to the Go copilot service's * `/api/validate-key` endpoint. Returns the associated userId on success. */ -async function authenticateCopilotApiKey(apiKey: string): Promise { +async function authenticateCopilotApiKey( + apiKey: string, +): Promise { try { - const internalSecret = env.INTERNAL_API_SECRET + const internalSecret = env.INTERNAL_API_SECRET; if (!internalSecret) { - logger.error('INTERNAL_API_SECRET not configured') - return { success: false, error: 'Server configuration error' } + logger.error("INTERNAL_API_SECRET not configured"); + return { success: false, error: "Server configuration error" }; } - const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key`, { - method: 'POST', + const { fetchGo } = await import("@/lib/copilot/request/go/fetch"); + const res = await fetchGo(`${SIM_AGENT_API_URL}/api/validate-key`, { + method: "POST", headers: { - 'Content-Type': 'application/json', - 'x-api-key': internalSecret, + "Content-Type": "application/json", + "x-api-key": internalSecret, }, body: JSON.stringify({ targetApiKey: apiKey }), signal: AbortSignal.timeout(10_000), - }) + spanName: "sim → go /api/validate-key (mcp)", + operation: "mcp_validate_key", + }); if (!res.ok) { - const body = await res.json().catch(() => null) - const upstream = (body as Record)?.message - const status = res.status + const body = await res.json().catch(() => null); + const upstream = (body as Record)?.message; + const status = res.status; if (status === 401 || status === 403) { return { success: false, error: `Invalid Copilot API key. Generate a new key in Settings → Copilot and set it in the x-api-key header.`, - } + }; } if (status === 402) { return { success: false, error: `Usage limit exceeded for this Copilot API key. Upgrade your plan or wait for your quota to reset.`, - } + }; } - return { success: false, error: String(upstream ?? 'Copilot API key validation failed') } + return { + success: false, + error: String(upstream ?? "Copilot API key validation failed"), + }; } - const data = (await res.json()) as { ok?: boolean; userId?: string } + const data = (await res.json()) as { ok?: boolean; userId?: string }; if (!data.ok || !data.userId) { return { success: false, - error: 'Invalid Copilot API key. Generate a new key in Settings → Copilot.', - } + error: + "Invalid Copilot API key. Generate a new key in Settings → Copilot.", + }; } - return { success: true, userId: data.userId } + return { success: true, userId: data.userId }; } catch (error) { - logger.error('Copilot API key validation failed', { error }) + logger.error("Copilot API key validation failed", { error }); return { success: false, error: - 'Could not validate Copilot API key — the authentication service is temporarily unreachable. This is NOT a problem with the API key itself; please retry shortly.', - } + "Could not validate Copilot API key — the authentication service is temporarily unreachable. This is NOT a problem with the API key itself; please retry shortly.", + }; } } @@ -147,232 +166,252 @@ When the user refers to a workflow by name or description ("the email one", "my - Tools that operate on a specific workflow such as \`sim_workflow\`, \`sim_test\`, \`sim_deploy\`, and workflow-scoped \`sim_info\` requests require \`workflowId\`. - If the user reports errors, route through \`sim_workflow\` and ask it to reproduce, inspect logs, and fix the issue end to end. - Variable syntax: \`\` for block outputs, \`{{ENV_VAR}}\` for env vars. -` +`; -type HeaderMap = Record +type HeaderMap = Record; -function createError(id: RequestId, code: ErrorCode | number, message: string): JSONRPCError { +function createError( + id: RequestId, + code: ErrorCode | number, + message: string, +): JSONRPCError { return { - jsonrpc: '2.0', + jsonrpc: "2.0", id, error: { code, message }, - } + }; } function normalizeRequestHeaders(request: NextRequest): HeaderMap { - const headers: HeaderMap = {} + const headers: HeaderMap = {}; request.headers.forEach((value, key) => { - headers[key.toLowerCase()] = value - }) + headers[key.toLowerCase()] = value; + }); - return headers + return headers; } -function readHeader(headers: HeaderMap | undefined, name: string): string | undefined { - if (!headers) return undefined - const value = headers[name.toLowerCase()] +function readHeader( + headers: HeaderMap | undefined, + name: string, +): string | undefined { + if (!headers) return undefined; + const value = headers[name.toLowerCase()]; if (Array.isArray(value)) { - return value[0] + return value[0]; } - return value + return value; } class NextResponseCapture { - private _status = 200 - private _headers = new Headers() - private _controller: ReadableStreamDefaultController | null = null - private _pendingChunks: Uint8Array[] = [] - private _closeHandlers: Array<() => void> = [] - private _errorHandlers: Array<(error: Error) => void> = [] - private _headersWritten = false - private _ended = false - private _headersPromise: Promise - private _resolveHeaders: (() => void) | null = null - private _endedPromise: Promise - private _resolveEnded: (() => void) | null = null - readonly readable: ReadableStream + private _status = 200; + private _headers = new Headers(); + private _controller: ReadableStreamDefaultController | null = + null; + private _pendingChunks: Uint8Array[] = []; + private _closeHandlers: Array<() => void> = []; + private _errorHandlers: Array<(error: Error) => void> = []; + private _headersWritten = false; + private _ended = false; + private _headersPromise: Promise; + private _resolveHeaders: (() => void) | null = null; + private _endedPromise: Promise; + private _resolveEnded: (() => void) | null = null; + readonly readable: ReadableStream; constructor() { this._headersPromise = new Promise((resolve) => { - this._resolveHeaders = resolve - }) + this._resolveHeaders = resolve; + }); this._endedPromise = new Promise((resolve) => { - this._resolveEnded = resolve - }) + this._resolveEnded = resolve; + }); this.readable = new ReadableStream({ start: (controller) => { - this._controller = controller + this._controller = controller; if (this._pendingChunks.length > 0) { for (const chunk of this._pendingChunks) { - controller.enqueue(chunk) + controller.enqueue(chunk); } - this._pendingChunks = [] + this._pendingChunks = []; } }, cancel: () => { - this._ended = true - this._resolveEnded?.() - this.triggerCloseHandlers() + this._ended = true; + this._resolveEnded?.(); + this.triggerCloseHandlers(); }, - }) + }); } private markHeadersWritten(): void { - if (this._headersWritten) return - this._headersWritten = true - this._resolveHeaders?.() + if (this._headersWritten) return; + this._headersWritten = true; + this._resolveHeaders?.(); } private triggerCloseHandlers(): void { for (const handler of this._closeHandlers) { try { - handler() + handler(); } catch (error) { - this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error))) + this.triggerErrorHandlers( + error instanceof Error ? error : new Error(String(error)), + ); } } } private triggerErrorHandlers(error: Error): void { for (const errorHandler of this._errorHandlers) { - errorHandler(error) + errorHandler(error); } } private normalizeChunk(chunk: unknown): Uint8Array | null { - if (typeof chunk === 'string') { - return new TextEncoder().encode(chunk) + if (typeof chunk === "string") { + return new TextEncoder().encode(chunk); } if (chunk instanceof Uint8Array) { - return chunk + return chunk; } if (chunk === undefined || chunk === null) { - return null + return null; } - return new TextEncoder().encode(String(chunk)) + return new TextEncoder().encode(String(chunk)); } - writeHead(status: number, headers?: Record): this { - this._status = status + writeHead( + status: number, + headers?: Record, + ): this { + this._status = status; if (headers) { Object.entries(headers).forEach(([key, value]) => { if (Array.isArray(value)) { - this._headers.set(key, value.join(', ')) + this._headers.set(key, value.join(", ")); } else { - this._headers.set(key, String(value)) + this._headers.set(key, String(value)); } - }) + }); } - this.markHeadersWritten() - return this + this.markHeadersWritten(); + return this; } flushHeaders(): this { - this.markHeadersWritten() - return this + this.markHeadersWritten(); + return this; } write(chunk: unknown): boolean { - const normalized = this.normalizeChunk(chunk) - if (!normalized) return true + const normalized = this.normalizeChunk(chunk); + if (!normalized) return true; - this.markHeadersWritten() + this.markHeadersWritten(); if (this._controller) { try { - this._controller.enqueue(normalized) + this._controller.enqueue(normalized); } catch (error) { - this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error))) + this.triggerErrorHandlers( + error instanceof Error ? error : new Error(String(error)), + ); } } else { - this._pendingChunks.push(normalized) + this._pendingChunks.push(normalized); } - return true + return true; } end(chunk?: unknown): this { - if (chunk !== undefined) this.write(chunk) - this.markHeadersWritten() - if (this._ended) return this + if (chunk !== undefined) this.write(chunk); + this.markHeadersWritten(); + if (this._ended) return this; - this._ended = true - this._resolveEnded?.() + this._ended = true; + this._resolveEnded?.(); if (this._controller) { try { - this._controller.close() + this._controller.close(); } catch (error) { - this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error))) + this.triggerErrorHandlers( + error instanceof Error ? error : new Error(String(error)), + ); } } - this.triggerCloseHandlers() + this.triggerCloseHandlers(); - return this + return this; } async waitForHeaders(timeoutMs = 30000): Promise { - if (this._headersWritten) return + if (this._headersWritten) return; await Promise.race([ this._headersPromise, new Promise((resolve) => { - setTimeout(resolve, timeoutMs) + setTimeout(resolve, timeoutMs); }), - ]) + ]); } async waitForEnd(timeoutMs = 30000): Promise { - if (this._ended) return + if (this._ended) return; await Promise.race([ this._endedPromise, new Promise((resolve) => { - setTimeout(resolve, timeoutMs) + setTimeout(resolve, timeoutMs); }), - ]) + ]); } - on(event: 'close' | 'error', handler: (() => void) | ((error: Error) => void)): this { - if (event === 'close') { - this._closeHandlers.push(handler as () => void) + on( + event: "close" | "error", + handler: (() => void) | ((error: Error) => void), + ): this { + if (event === "close") { + this._closeHandlers.push(handler as () => void); } - if (event === 'error') { - this._errorHandlers.push(handler as (error: Error) => void) + if (event === "error") { + this._errorHandlers.push(handler as (error: Error) => void); } - return this + return this; } toNextResponse(): NextResponse { return new NextResponse(this.readable, { status: this._status, headers: this._headers, - }) + }); } } function buildMcpServer(abortSignal?: AbortSignal): Server { const server = new Server( { - name: 'sim-copilot', - version: '1.0.0', + name: "sim-copilot", + version: "1.0.0", }, { capabilities: { tools: {} }, instructions: MCP_SERVER_INSTRUCTIONS, - } - ) + }, + ); server.setRequestHandler(ListToolsRequestSchema, async () => { const directTools = DIRECT_TOOL_DEFS.map((tool) => ({ @@ -380,100 +419,100 @@ function buildMcpServer(abortSignal?: AbortSignal): Server { description: tool.description, inputSchema: tool.inputSchema, ...(tool.annotations && { annotations: tool.annotations }), - })) + })); const subagentTools = SUBAGENT_TOOL_DEFS.map((tool) => ({ name: tool.name, description: tool.description, inputSchema: tool.inputSchema, ...(tool.annotations && { annotations: tool.annotations }), - })) + })); const result: ListToolsResult = { tools: [...directTools, ...subagentTools], - } + }; - return result - }) + return result; + }); server.setRequestHandler(CallToolRequestSchema, async (request, extra) => { - const headers = (extra.requestInfo?.headers || {}) as HeaderMap - const apiKeyHeader = readHeader(headers, 'x-api-key') - const authorizationHeader = readHeader(headers, 'authorization') + const headers = (extra.requestInfo?.headers || {}) as HeaderMap; + const apiKeyHeader = readHeader(headers, "x-api-key"); + const authorizationHeader = readHeader(headers, "authorization"); - let authResult: CopilotKeyAuthResult = { success: false } + let authResult: CopilotKeyAuthResult = { success: false }; - if (authorizationHeader?.startsWith('Bearer ')) { - const token = authorizationHeader.slice(7) - const oauthResult = await validateOAuthAccessToken(token) + if (authorizationHeader?.startsWith("Bearer ")) { + const token = authorizationHeader.slice(7); + const oauthResult = await validateOAuthAccessToken(token); if (oauthResult.success && oauthResult.userId) { - if (!oauthResult.scopes?.includes('mcp:tools')) { + if (!oauthResult.scopes?.includes("mcp:tools")) { return { content: [ { - type: 'text' as const, + type: "text" as const, text: 'AUTHENTICATION ERROR: OAuth token is missing the required "mcp:tools" scope. Re-authorize with the correct scopes.', }, ], isError: true, - } + }; } - authResult = { success: true, userId: oauthResult.userId } + authResult = { success: true, userId: oauthResult.userId }; } else { return { content: [ { - type: 'text' as const, - text: `AUTHENTICATION ERROR: ${oauthResult.error ?? 'Invalid OAuth access token'} Do NOT retry — re-authorize via OAuth.`, + type: "text" as const, + text: `AUTHENTICATION ERROR: ${oauthResult.error ?? "Invalid OAuth access token"} Do NOT retry — re-authorize via OAuth.`, }, ], isError: true, - } + }; } } else if (apiKeyHeader) { - authResult = await authenticateCopilotApiKey(apiKeyHeader) + authResult = await authenticateCopilotApiKey(apiKeyHeader); } if (!authResult.success || !authResult.userId) { const errorMsg = apiKeyHeader ? `AUTHENTICATION ERROR: ${authResult.error} Do NOT retry — this will fail until the user fixes their Copilot API key.` - : 'AUTHENTICATION ERROR: No authentication provided. Provide a Bearer token (OAuth 2.1) or an x-api-key header. Generate a Copilot API key in Settings → Copilot.' - logger.warn('MCP copilot auth failed', { method: request.method }) + : "AUTHENTICATION ERROR: No authentication provided. Provide a Bearer token (OAuth 2.1) or an x-api-key header. Generate a Copilot API key in Settings → Copilot."; + logger.warn("MCP copilot auth failed", { method: request.method }); return { content: [ { - type: 'text' as const, + type: "text" as const, text: errorMsg, }, ], isError: true, - } + }; } const rateLimitResult = await mcpRateLimiter.checkRateLimitWithSubscription( authResult.userId, await getHighestPrioritySubscription(authResult.userId), - 'api-endpoint', - false - ) + "api-endpoint", + false, + ); if (!rateLimitResult.allowed) { return { content: [ { - type: 'text' as const, + type: "text" as const, text: `RATE LIMIT: Too many requests. Please wait and retry after ${rateLimitResult.resetAt.toISOString()}.`, }, ], isError: true, - } + }; } const params = request.params as | { name?: string; arguments?: Record } - | undefined + | undefined; if (!params?.name) { - throw new McpError(ErrorCode.InvalidParams, 'Tool name required') + throw new McpError(ErrorCode.InvalidParams, "Tool name required"); } const result = await handleToolsCall( @@ -482,46 +521,50 @@ function buildMcpServer(abortSignal?: AbortSignal): Server { arguments: params.arguments, }, authResult.userId, - abortSignal - ) + abortSignal, + ); - trackMcpCopilotCall(authResult.userId) + trackMcpCopilotCall(authResult.userId); - return result - }) + return result; + }); - return server + return server; } async function handleMcpRequestWithSdk( request: NextRequest, - parsedBody: unknown + parsedBody: unknown, ): Promise { - const server = buildMcpServer(request.signal) + const server = buildMcpServer(request.signal); const transport = new StreamableHTTPServerTransport({ sessionIdGenerator: undefined, enableJsonResponse: true, - }) + }); - const responseCapture = new NextResponseCapture() + const responseCapture = new NextResponseCapture(); const requestAdapter = { method: request.method, headers: normalizeRequestHeaders(request), - } + }; - await server.connect(transport) + await server.connect(transport); try { - await transport.handleRequest(requestAdapter as any, responseCapture as any, parsedBody) - await responseCapture.waitForHeaders() + await transport.handleRequest( + requestAdapter as any, + responseCapture as any, + parsedBody, + ); + await responseCapture.waitForHeaders(); // Must exceed the longest possible tool execution. // Using ORCHESTRATION_TIMEOUT_MS + 60 s buffer so the orchestrator can // finish or time-out on its own before the transport is torn down. - await responseCapture.waitForEnd(ORCHESTRATION_TIMEOUT_MS + 60_000) - return responseCapture.toNextResponse() + await responseCapture.waitForEnd(ORCHESTRATION_TIMEOUT_MS + 60_000); + return responseCapture.toNextResponse(); } finally { - await server.close().catch(() => {}) - await transport.close().catch(() => {}) + await server.close().catch(() => {}); + await transport.close().catch(() => {}); } } @@ -529,41 +572,48 @@ export async function GET() { // Return 405 to signal that server-initiated SSE notifications are not // supported. Without this, clients like mcp-remote will repeatedly // reconnect trying to open an SSE stream, flooding the logs with GETs. - return new NextResponse(null, { status: 405 }) + return new NextResponse(null, { status: 405 }); } export async function POST(request: NextRequest) { - const hasAuth = request.headers.has('authorization') || request.headers.has('x-api-key') + const hasAuth = + request.headers.has("authorization") || request.headers.has("x-api-key"); if (!hasAuth) { - const origin = getBaseUrl().replace(/\/$/, '') - const resourceMetadataUrl = `${origin}/.well-known/oauth-protected-resource/api/mcp/copilot` - return new NextResponse(JSON.stringify({ error: 'unauthorized' }), { + const origin = getBaseUrl().replace(/\/$/, ""); + const resourceMetadataUrl = `${origin}/.well-known/oauth-protected-resource/api/mcp/copilot`; + return new NextResponse(JSON.stringify({ error: "unauthorized" }), { status: 401, headers: { - 'WWW-Authenticate': `Bearer resource_metadata="${resourceMetadataUrl}", scope="mcp:tools"`, - 'Content-Type': 'application/json', + "WWW-Authenticate": `Bearer resource_metadata="${resourceMetadataUrl}", scope="mcp:tools"`, + "Content-Type": "application/json", }, - }) + }); } try { - let parsedBody: unknown + let parsedBody: unknown; try { - parsedBody = await request.json() + parsedBody = await request.json(); } catch { - return NextResponse.json(createError(0, ErrorCode.ParseError, 'Invalid JSON body'), { - status: 400, - }) + return NextResponse.json( + createError(0, ErrorCode.ParseError, "Invalid JSON body"), + { + status: 400, + }, + ); } - return await handleMcpRequestWithSdk(request, parsedBody) + return await handleMcpRequestWithSdk(request, parsedBody); } catch (error) { - logger.error('Error handling MCP request', { error }) - return NextResponse.json(createError(0, ErrorCode.InternalError, 'Internal error'), { - status: 500, - }) + logger.error("Error handling MCP request", { error }); + return NextResponse.json( + createError(0, ErrorCode.InternalError, "Internal error"), + { + status: 500, + }, + ); } } @@ -571,18 +621,20 @@ export async function OPTIONS() { return new NextResponse(null, { status: 204, headers: { - 'Access-Control-Allow-Origin': '*', - 'Access-Control-Allow-Methods': 'GET, POST, OPTIONS, DELETE', - 'Access-Control-Allow-Headers': - 'Content-Type, Authorization, X-API-Key, X-Requested-With, Accept', - 'Access-Control-Max-Age': '86400', + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Methods": "GET, POST, OPTIONS, DELETE", + "Access-Control-Allow-Headers": + "Content-Type, Authorization, X-API-Key, X-Requested-With, Accept", + "Access-Control-Max-Age": "86400", }, - }) + }); } export async function DELETE(request: NextRequest) { - void request - return NextResponse.json(createError(0, -32000, 'Method not allowed.'), { status: 405 }) + void request; + return NextResponse.json(createError(0, -32000, "Method not allowed."), { + status: 405, + }); } /** @@ -597,73 +649,82 @@ function trackMcpCopilotCall(userId: string): void { .where(eq(userStats.userId, userId)) .then(() => {}) .catch((error) => { - logger.error('Failed to track MCP copilot call', { error, userId }) - }) + logger.error("Failed to track MCP copilot call", { error, userId }); + }); } async function handleToolsCall( params: { name: string; arguments?: Record }, userId: string, - abortSignal?: AbortSignal + abortSignal?: AbortSignal, ): Promise { - const args = params.arguments || {} + const args = params.arguments || {}; - const directTool = DIRECT_TOOL_DEFS.find((tool) => tool.name === params.name) + const directTool = DIRECT_TOOL_DEFS.find((tool) => tool.name === params.name); if (directTool) { - return handleDirectToolCall(directTool, args, userId) + return handleDirectToolCall(directTool, args, userId); } - const subagentTool = SUBAGENT_TOOL_DEFS.find((tool) => tool.name === params.name) + const subagentTool = SUBAGENT_TOOL_DEFS.find( + (tool) => tool.name === params.name, + ); if (subagentTool) { - return handleSubagentToolCall(subagentTool, args, userId, abortSignal) + return handleSubagentToolCall(subagentTool, args, userId, abortSignal); } - throw new McpError(ErrorCode.MethodNotFound, `Tool not found: ${params.name}`) + throw new McpError( + ErrorCode.MethodNotFound, + `Tool not found: ${params.name}`, + ); } async function handleDirectToolCall( toolDef: (typeof DIRECT_TOOL_DEFS)[number], args: Record, - userId: string + userId: string, ): Promise { try { const execContext = await prepareExecutionContext( userId, - (args.workflowId as string) || '', - (args.chatId as string) || undefined - ) + (args.workflowId as string) || "", + (args.chatId as string) || undefined, + ); const toolCall = { id: generateId(), name: toolDef.toolId, - status: 'pending' as const, + status: "pending" as const, params: args as Record, startTime: Date.now(), - } + }; - ensureHandlersRegistered() - const result = await executeTool(toolCall.name, toolCall.params || {}, execContext) + ensureHandlersRegistered(); + const result = await executeTool( + toolCall.name, + toolCall.params || {}, + execContext, + ); return { content: [ { - type: 'text', + type: "text", text: JSON.stringify(result.output ?? result, null, 2), }, ], isError: !result.success, - } + }; } catch (error) { - logger.error('Direct tool execution failed', { tool: toolDef.name, error }) + logger.error("Direct tool execution failed", { tool: toolDef.name, error }); return { content: [ { - type: 'text', + type: "text", text: `Tool execution failed: ${error instanceof Error ? error.message : String(error)}`, }, ], isError: true, - } + }; } } @@ -676,72 +737,89 @@ async function handleDirectToolCall( async function handleBuildToolCall( args: Record, userId: string, - abortSignal?: AbortSignal + abortSignal?: AbortSignal, ): Promise { try { - const requestText = (args.request as string) || JSON.stringify(args) - const workflowId = args.workflowId as string | undefined - let resolvedWorkflowName: string | undefined - let resolvedWorkspaceId: string | undefined + const requestText = (args.request as string) || JSON.stringify(args); + const workflowId = args.workflowId as string | undefined; + let resolvedWorkflowName: string | undefined; + let resolvedWorkspaceId: string | undefined; const resolved = workflowId ? await (async () => { const authorization = await authorizeWorkflowByWorkspacePermission({ workflowId, userId, - action: 'read', - }) - resolvedWorkflowName = authorization.workflow?.name || undefined - resolvedWorkspaceId = authorization.workflow?.workspaceId || undefined + action: "read", + }); + resolvedWorkflowName = authorization.workflow?.name || undefined; + resolvedWorkspaceId = + authorization.workflow?.workspaceId || undefined; return authorization.allowed - ? { status: 'resolved' as const, workflowId, workflowName: resolvedWorkflowName } - : { - status: 'not_found' as const, - message: 'workflowId is required for build. Call create_workflow first.', + ? { + status: "resolved" as const, + workflowId, + workflowName: resolvedWorkflowName, } + : { + status: "not_found" as const, + message: + "workflowId is required for build. Call create_workflow first.", + }; })() - : await resolveWorkflowIdForUser(userId) + : await resolveWorkflowIdForUser(userId); - if (resolved.status === 'resolved') { - resolvedWorkflowName ||= resolved.workflowName + if (resolved.status === "resolved") { + resolvedWorkflowName ||= resolved.workflowName; } - if (!resolved || resolved.status !== 'resolved') { + if (!resolved || resolved.status !== "resolved") { return { content: [ { - type: 'text', + type: "text", text: JSON.stringify( { success: false, error: resolved?.message ?? - 'workflowId is required for build. Call create_workflow first.', + "workflowId is required for build. Call create_workflow first.", }, null, - 2 + 2, ), }, ], isError: true, - } + }; } - const chatId = generateId() - const executionContext = await prepareExecutionContext(userId, resolved.workflowId, chatId, { - workspaceId: resolvedWorkspaceId, - }) - resolvedWorkspaceId = executionContext.workspaceId - let workspaceContext: string | undefined + const chatId = generateId(); + const executionContext = await prepareExecutionContext( + userId, + resolved.workflowId, + chatId, + { + workspaceId: resolvedWorkspaceId, + }, + ); + resolvedWorkspaceId = executionContext.workspaceId; + let workspaceContext: string | undefined; if (resolvedWorkspaceId) { try { - workspaceContext = await generateWorkspaceContext(resolvedWorkspaceId, userId) + workspaceContext = await generateWorkspaceContext( + resolvedWorkspaceId, + userId, + ); } catch (error) { - logger.warn('Failed to generate workspace context for build tool call', { - workflowId: resolved.workflowId, - workspaceId: resolvedWorkspaceId, - error: error instanceof Error ? error.message : String(error), - }) + logger.warn( + "Failed to generate workspace context for build tool call", + { + workflowId: resolved.workflowId, + workspaceId: resolvedWorkspaceId, + error: error instanceof Error ? error.message : String(error), + }, + ); } } @@ -753,47 +831,47 @@ async function handleBuildToolCall( ...(workspaceContext ? { workspaceContext } : {}), userId, model: DEFAULT_COPILOT_MODEL, - mode: 'agent', - commands: ['fast'], + mode: "agent", + commands: ["fast"], messageId: generateId(), chatId, - } + }; const result = await runHeadlessCopilotLifecycle(requestPayload, { userId, workflowId: resolved.workflowId, workspaceId: resolvedWorkspaceId, chatId, - goRoute: '/api/mcp', + goRoute: "/api/mcp", executionContext, autoExecuteTools: true, timeout: ORCHESTRATION_TIMEOUT_MS, interactive: false, abortSignal, - }) + }); const responseData = { success: result.success, content: result.content, toolCalls: result.toolCalls, error: result.error, - } + }; return { - content: [{ type: 'text', text: JSON.stringify(responseData, null, 2) }], + content: [{ type: "text", text: JSON.stringify(responseData, null, 2) }], isError: !result.success, - } + }; } catch (error) { - logger.error('Build tool call failed', { error }) + logger.error("Build tool call failed", { error }); return { content: [ { - type: 'text', + type: "text", text: `Build failed: ${error instanceof Error ? error.message : String(error)}`, }, ], isError: true, - } + }; } } @@ -801,10 +879,10 @@ async function handleSubagentToolCall( toolDef: (typeof SUBAGENT_TOOL_DEFS)[number], args: Record, userId: string, - abortSignal?: AbortSignal + abortSignal?: AbortSignal, ): Promise { - if (toolDef.agentId === 'workflow') { - return handleBuildToolCall(args, userId, abortSignal) + if (toolDef.agentId === "workflow") { + return handleBuildToolCall(args, userId, abortSignal); } try { @@ -812,11 +890,12 @@ async function handleSubagentToolCall( (args.request as string) || (args.message as string) || (args.error as string) || - JSON.stringify(args) + JSON.stringify(args); + const simRequestId = createRequestId(); - const context = (args.context as Record) || {} + const context = (args.context as Record) || {}; if (args.plan && !context.plan) { - context.plan = args.plan + context.plan = args.plan; } const result = await orchestrateSubagentStream( @@ -828,17 +907,18 @@ async function handleSubagentToolCall( context, model: DEFAULT_COPILOT_MODEL, headless: true, - source: 'mcp', + source: "mcp", }, { userId, workflowId: args.workflowId as string | undefined, workspaceId: args.workspaceId as string | undefined, + simRequestId, abortSignal, - } - ) + }, + ); - let responseData: unknown + let responseData: unknown; if (result.structuredResult) { responseData = { @@ -846,44 +926,44 @@ async function handleSubagentToolCall( type: result.structuredResult.type, summary: result.structuredResult.summary, data: result.structuredResult.data, - } + }; } else if (result.error) { responseData = { success: false, error: result.error, errors: result.errors, - } + }; } else { responseData = { success: result.success, content: result.content, - } + }; } return { content: [ { - type: 'text', + type: "text", text: JSON.stringify(responseData, null, 2), }, ], isError: !result.success, - } + }; } catch (error) { - logger.error('Subagent tool call failed', { + logger.error("Subagent tool call failed", { tool: toolDef.name, agentId: toolDef.agentId, error, - }) + }); return { content: [ { - type: 'text', + type: "text", text: `Subagent call failed: ${error instanceof Error ? error.message : String(error)}`, }, ], isError: true, - } + }; } } diff --git a/apps/sim/instrumentation-node.ts b/apps/sim/instrumentation-node.ts index 0f6bd4352d7..ff89d5b5eea 100644 --- a/apps/sim/instrumentation-node.ts +++ b/apps/sim/instrumentation-node.ts @@ -1,10 +1,29 @@ /** * Sim OpenTelemetry - Server-side Instrumentation + * + * Mothership joint trace design + * ----------------------------- + * Both Sim (this file) and the Go copilot server register under a single + * OTel `service.name = "mothership"` so every request shows up as one + * service in the OTLP backend. To keep the two halves distinguishable: + * + * - Every span emitted by this process is prefixed with `sim: ` on + * start, and gets a `mothership.origin = "sim"` attribute. + * - The Go side does the same with `go: ` / `mothership.origin = "go"`. + * + * So in Jaeger/Tempo, filtering by `mothership.origin` (exact) or by + * operation name prefix (`sim:` / `go:`) cleanly splits the two halves. */ import type { Attributes, Context, Link, SpanKind } from '@opentelemetry/api' -import { DiagConsoleLogger, DiagLogLevel, diag } from '@opentelemetry/api' -import type { Sampler, SamplingResult } from '@opentelemetry/sdk-trace-base' +import { DiagConsoleLogger, DiagLogLevel, TraceFlags, diag, trace } from '@opentelemetry/api' +import type { + ReadableSpan, + Sampler, + SamplingResult, + Span, + SpanProcessor, +} from '@opentelemetry/sdk-trace-base' import { createLogger } from '@sim/logger' import { env } from './lib/core/config/env' @@ -12,9 +31,13 @@ diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.ERROR) const logger = createLogger('OTelInstrumentation') +const MOTHERSHIP_ORIGIN = 'sim' as const +const SPAN_NAME_PREFIX = `${MOTHERSHIP_ORIGIN}: ` + const DEFAULT_TELEMETRY_CONFIG = { endpoint: env.TELEMETRY_ENDPOINT || 'https://telemetry.simstudio.ai/v1/traces', - serviceName: 'sim-studio', + // Joint Sim+Go service surface in Jaeger/Tempo. See header comment. + serviceName: 'mothership', serviceVersion: '0.1.0', serverSide: { enabled: true }, batchSettings: { @@ -26,28 +49,60 @@ const DEFAULT_TELEMETRY_CONFIG = { } /** - * Span name prefixes we want to KEEP + * Span name prefixes we keep after sampling. All spans we care about + * (copilot lifecycle, fetchGo Sim→Go calls, gen_ai.* root, workflow/block + * executions, etc.) start with one of these. Anything else is Next.js + * framework noise and gets dropped unless its parent is already sampled. */ const ALLOWED_SPAN_PREFIXES = [ - 'platform.', // Our platform events - 'gen_ai.', // GenAI semantic convention spans - 'workflow.', // Workflow execution spans - 'block.', // Block execution spans - 'http.client.', // Our API block HTTP calls - 'function.', // Function block execution - 'router.', // Router block evaluation - 'condition.', // Condition block evaluation - 'loop.', // Loop block execution - 'parallel.', // Parallel block execution + 'platform.', + 'gen_ai.', + 'workflow.', + 'block.', + 'http.client.', + 'function.', + 'router.', + 'condition.', + 'loop.', + 'parallel.', + 'copilot.', + 'sim →', + 'sim.', ] function isBusinessSpan(spanName: string): boolean { return ALLOWED_SPAN_PREFIXES.some((prefix) => spanName.startsWith(prefix)) } +/** + * MothershipOriginSpanProcessor tags every span this process creates with + * `mothership.origin` and prepends a `sim: ` prefix to the span name on + * start, before any downstream processor (BatchSpanProcessor) reads it. + * + * Implemented as its own processor rather than a resource attribute so + * the backend span/operation list (which keys on span name) is visually + * split between sim and go even when both share service.name. + */ +class MothershipOriginSpanProcessor implements SpanProcessor { + onStart(span: Span): void { + span.setAttribute('mothership.origin', MOTHERSHIP_ORIGIN) + const name = span.name + if (!name.startsWith(SPAN_NAME_PREFIX)) { + span.updateName(`${SPAN_NAME_PREFIX}${name}`) + } + } + onEnd(_span: ReadableSpan): void {} + shutdown(): Promise { + return Promise.resolve() + } + forceFlush(): Promise { + return Promise.resolve() + } +} + async function initializeOpenTelemetry() { try { - if (env.NEXT_TELEMETRY_DISABLED === '1') { + if (env.NEXT_TELEMETRY_DISABLED === '1' || process.env.NEXT_TELEMETRY_DISABLED === '1') { logger.info('OpenTelemetry disabled via NEXT_TELEMETRY_DISABLED=1') return } @@ -59,11 +114,28 @@ async function initializeOpenTelemetry() { telemetryConfig = DEFAULT_TELEMETRY_CONFIG } + // Prefer process.env directly: @t3-oss/env-nextjs sometimes returns + // undefined for server vars that aren't listed in experimental__runtimeEnv, + // and TELEMETRY_ENDPOINT isn't mapped there. + const resolvedEndpoint = + process.env.TELEMETRY_ENDPOINT || env.TELEMETRY_ENDPOINT || telemetryConfig.endpoint + telemetryConfig = { + ...telemetryConfig, + endpoint: resolvedEndpoint, + serviceName: 'mothership', + } + if (telemetryConfig.serverSide?.enabled === false) { logger.info('Server-side OpenTelemetry disabled in config') return } + logger.info('OpenTelemetry init', { + endpoint: telemetryConfig.endpoint, + serviceName: telemetryConfig.serviceName, + origin: MOTHERSHIP_ORIGIN, + }) + const { NodeSDK } = await import('@opentelemetry/sdk-node') const { defaultResource, resourceFromAttributes } = await import('@opentelemetry/resources') const { ATTR_SERVICE_NAME, ATTR_SERVICE_VERSION, ATTR_DEPLOYMENT_ENVIRONMENT } = await import( @@ -71,11 +143,19 @@ async function initializeOpenTelemetry() { ) const { OTLPTraceExporter } = await import('@opentelemetry/exporter-trace-otlp-http') const { BatchSpanProcessor } = await import('@opentelemetry/sdk-trace-node') - const { ParentBasedSampler, TraceIdRatioBasedSampler, SamplingDecision } = await import( + const { TraceIdRatioBasedSampler, SamplingDecision } = await import( '@opentelemetry/sdk-trace-base' ) - const createBusinessSpanSampler = (baseSampler: Sampler): Sampler => ({ + // Sampler responsibilities: + // 1. Drop Next.js framework spans (tagged with next.span_type). + // 2. If we're inside a sampled business trace (parent has SAMPLED), let + // the child record so the full trace stays together. + // 3. For a business-span ROOT, decide afresh with the ratio sampler — + // ignoring an unsampled Next.js HTTP parent. Delegating to + // ParentBasedSampler here would use its localParentNotSampled + // inner sampler (AlwaysOff by default) and veto every trace. + const createBusinessSpanSampler = (rootRatioSampler: Sampler): Sampler => ({ shouldSample( context: Context, traceId: string, @@ -88,15 +168,30 @@ async function initializeOpenTelemetry() { return { decision: SamplingDecision.NOT_RECORD } } + const parentSpanContext = trace.getSpanContext(context) + const parentIsSampled = + !!parentSpanContext && + (parentSpanContext.traceFlags & TraceFlags.SAMPLED) === TraceFlags.SAMPLED + + if (parentIsSampled) { + return { decision: SamplingDecision.RECORD_AND_SAMPLED } + } + if (isBusinessSpan(spanName)) { - return baseSampler.shouldSample(context, traceId, spanName, spanKind, attributes, links) + return rootRatioSampler.shouldSample( + context, + traceId, + spanName, + spanKind, + attributes, + links + ) } return { decision: SamplingDecision.NOT_RECORD } }, - toString(): string { - return `BusinessSpanSampler{baseSampler=${baseSampler.toString()}}` + return `BusinessSpanSampler{rootSampler=${rootRatioSampler.toString()}}` }, }) @@ -107,6 +202,24 @@ async function initializeOpenTelemetry() { keepAlive: false, }) + // Surface export failures in the Sim log instead of letting + // BatchSpanProcessor silently drop them. + const origExport = exporter.export.bind(exporter) + exporter.export = (spans, resultCallback) => { + origExport(spans, (result) => { + if (result?.code !== 0) { + // eslint-disable-next-line no-console + console.error('[OTEL] exporter export failed', { + endpoint: telemetryConfig.endpoint, + resultCode: result?.code, + error: result?.error?.message, + spanCount: spans.length, + }) + } + resultCallback(result) + }) + } + const batchProcessor = new BatchSpanProcessor(exporter, { maxQueueSize: telemetryConfig.batchSettings.maxQueueSize, maxExportBatchSize: telemetryConfig.batchSettings.maxExportBatchSize, @@ -119,23 +232,31 @@ async function initializeOpenTelemetry() { [ATTR_SERVICE_NAME]: telemetryConfig.serviceName, [ATTR_SERVICE_VERSION]: telemetryConfig.serviceVersion, [ATTR_DEPLOYMENT_ENVIRONMENT]: env.NODE_ENV || 'development', - 'service.namespace': 'sim-ai-platform', + 'service.namespace': 'mothership', + 'mothership.origin': MOTHERSHIP_ORIGIN, 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.version': '1.0.0', }) ) - const baseSampler = new ParentBasedSampler({ - root: new TraceIdRatioBasedSampler(0.1), - }) - const sampler = createBusinessSpanSampler(baseSampler) + // Dev / self-hosted OTLP backends (Jaeger/Tempo on localhost) should + // capture every trace so manual verification is deterministic. Keep 10% + // for production cloud endpoints. + const isLocalEndpoint = /localhost|127\.0\.0\.1/i.test(telemetryConfig.endpoint) + const samplingRatio = isLocalEndpoint ? 1.0 : 0.1 + const rootRatioSampler = new TraceIdRatioBasedSampler(samplingRatio) + const sampler = createBusinessSpanSampler(rootRatioSampler) + + // Order matters: the origin-prefix processor must run BEFORE the batch + // processor so the renamed span and the mothership.origin attribute are + // captured on export. + const spanProcessors: SpanProcessor[] = [new MothershipOriginSpanProcessor(), batchProcessor] const sdk = new NodeSDK({ resource, - spanProcessor: batchProcessor, + spanProcessors, sampler, - traceExporter: exporter, }) sdk.start() @@ -152,7 +273,11 @@ async function initializeOpenTelemetry() { process.on('SIGTERM', shutdownOtel) process.on('SIGINT', shutdownOtel) - logger.info('OpenTelemetry instrumentation initialized with business span filtering') + logger.info('OpenTelemetry instrumentation initialized', { + serviceName: telemetryConfig.serviceName, + origin: MOTHERSHIP_ORIGIN, + samplingRatio, + }) } catch (error) { logger.error('Failed to initialize OpenTelemetry instrumentation', error) } diff --git a/apps/sim/lib/copilot/async-runs/repository.ts b/apps/sim/lib/copilot/async-runs/repository.ts index c18d27cbbec..9fd082e72d7 100644 --- a/apps/sim/lib/copilot/async-runs/repository.ts +++ b/apps/sim/lib/copilot/async-runs/repository.ts @@ -1,3 +1,4 @@ +import { SpanStatusCode, trace } from '@opentelemetry/api' import { db } from '@sim/db' import { type CopilotAsyncToolStatus, @@ -16,6 +17,47 @@ import { } from './lifecycle' const logger = createLogger('CopilotAsyncRunsRepo') +// Resolve the tracer lazily per-call to avoid capturing the NoOp tracer +// before NodeSDK installs the global TracerProvider (Next.js 16/Turbopack +// can evaluate modules before instrumentation-node.ts finishes). +const getAsyncRunsTracer = () => trace.getTracer('sim-copilot-async-runs', '1.0.0') + +/** + * withDbSpan wraps an async DB operation in a client-kind OTel span with + * canonical `db.*` attributes so every async-runs call is visible in traces + * alongside the owning request. + */ +async function withDbSpan( + name: string, + op: string, + table: string, + attrs: Record, + fn: () => Promise, +): Promise { + const span = getAsyncRunsTracer().startSpan(name, { + attributes: { + 'db.system': 'postgresql', + 'db.operation': op, + 'db.sql.table': table, + ...Object.fromEntries( + Object.entries(attrs).filter(([, v]) => v !== undefined), + ), + }, + }) + try { + const result = await fn() + return result + } catch (error) { + span.recordException(error instanceof Error ? error : new Error(String(error))) + span.setStatus({ + code: SpanStatusCode.ERROR, + message: error instanceof Error ? error.message : String(error), + }) + throw error + } finally { + span.end() + } +} export interface CreateRunSegmentInput { id?: string @@ -34,26 +76,43 @@ export interface CreateRunSegmentInput { } export async function createRunSegment(input: CreateRunSegmentInput) { - const [run] = await db - .insert(copilotRuns) - .values({ - ...(input.id ? { id: input.id } : {}), - executionId: input.executionId, - parentRunId: input.parentRunId ?? null, - chatId: input.chatId, - userId: input.userId, - workflowId: input.workflowId ?? null, - workspaceId: input.workspaceId ?? null, - streamId: input.streamId, - agent: input.agent ?? null, - model: input.model ?? null, - provider: input.provider ?? null, - requestContext: input.requestContext ?? {}, - status: input.status ?? 'active', - }) - .returning() - - return run + return withDbSpan( + 'copilot.async_runs.create_run_segment', + 'INSERT', + 'copilot_runs', + { + 'copilot.execution_id': input.executionId, + 'copilot.chat_id': input.chatId, + 'copilot.stream_id': input.streamId, + 'copilot.user_id': input.userId, + 'copilot.run.parent_id': input.parentRunId ?? undefined, + 'copilot.run.agent': input.agent ?? undefined, + 'copilot.run.model': input.model ?? undefined, + 'copilot.run.provider': input.provider ?? undefined, + 'copilot.run.status': input.status ?? 'active', + }, + async () => { + const [run] = await db + .insert(copilotRuns) + .values({ + ...(input.id ? { id: input.id } : {}), + executionId: input.executionId, + parentRunId: input.parentRunId ?? null, + chatId: input.chatId, + userId: input.userId, + workflowId: input.workflowId ?? null, + workspaceId: input.workspaceId ?? null, + streamId: input.streamId, + agent: input.agent ?? null, + model: input.model ?? null, + provider: input.provider ?? null, + requestContext: input.requestContext ?? {}, + status: input.status ?? 'active', + }) + .returning() + return run + }, + ) } export async function updateRunStatus( @@ -65,19 +124,31 @@ export async function updateRunStatus( requestContext?: Record } = {} ) { - const [run] = await db - .update(copilotRuns) - .set({ - status, - completedAt: updates.completedAt, - error: updates.error, - requestContext: updates.requestContext, - updatedAt: new Date(), - }) - .where(eq(copilotRuns.id, runId)) - .returning() - - return run ?? null + return withDbSpan( + 'copilot.async_runs.update_run_status', + 'UPDATE', + 'copilot_runs', + { + 'copilot.run.id': runId, + 'copilot.run.status': status, + 'copilot.run.has_error': !!updates.error, + 'copilot.run.has_completed_at': !!updates.completedAt, + }, + async () => { + const [run] = await db + .update(copilotRuns) + .set({ + status, + completedAt: updates.completedAt, + error: updates.error, + requestContext: updates.requestContext, + updatedAt: new Date(), + }) + .where(eq(copilotRuns.id, runId)) + .returning() + return run ?? null + }, + ) } export async function getLatestRunForExecution(executionId: string) { diff --git a/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts b/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts index 6394988d439..b702b8de3db 100644 --- a/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts +++ b/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts @@ -5,1416 +5,1849 @@ export type JsonSchema = unknown export const MOTHERSHIP_STREAM_V1_SCHEMA: JsonSchema = { - $defs: { - MothershipStreamV1AdditionalPropertiesMap: { - additionalProperties: true, - type: 'object', - }, - MothershipStreamV1AsyncToolRecordStatus: { - enum: ['pending', 'running', 'completed', 'failed', 'cancelled', 'delivered'], - type: 'string', - }, - MothershipStreamV1CheckpointPauseEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1CheckpointPausePayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['run'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "$defs": { + "MothershipStreamV1AdditionalPropertiesMap": { + "additionalProperties": true, + "type": "object" + }, + "MothershipStreamV1AsyncToolRecordStatus": { + "enum": [ + "pending", + "running", + "completed", + "failed", + "cancelled", + "delivered" + ], + "type": "string" + }, + "MothershipStreamV1CheckpointPauseEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1CheckpointPausePayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "run" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1CheckpointPauseFrame: { - additionalProperties: false, - properties: { - parentToolCallId: { - type: 'string', - }, - parentToolName: { - type: 'string', - }, - pendingToolIds: { - items: { - type: 'string', + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1CheckpointPauseFrame": { + "additionalProperties": false, + "properties": { + "parentToolCallId": { + "type": "string" + }, + "parentToolName": { + "type": "string" + }, + "pendingToolIds": { + "items": { + "type": "string" }, - type: 'array', - }, + "type": "array" + } }, - required: ['parentToolCallId', 'parentToolName', 'pendingToolIds'], - type: 'object', - }, - MothershipStreamV1CheckpointPausePayload: { - additionalProperties: false, - properties: { - checkpointId: { - type: 'string', - }, - executionId: { - type: 'string', - }, - frames: { - items: { - $ref: '#/$defs/MothershipStreamV1CheckpointPauseFrame', + "required": [ + "parentToolCallId", + "parentToolName", + "pendingToolIds" + ], + "type": "object" + }, + "MothershipStreamV1CheckpointPausePayload": { + "additionalProperties": false, + "properties": { + "checkpointId": { + "type": "string" + }, + "executionId": { + "type": "string" + }, + "frames": { + "items": { + "$ref": "#/$defs/MothershipStreamV1CheckpointPauseFrame" }, - type: 'array', + "type": "array" }, - kind: { - enum: ['checkpoint_pause'], - type: 'string', + "kind": { + "enum": [ + "checkpoint_pause" + ], + "type": "string" }, - pendingToolCallIds: { - items: { - type: 'string', + "pendingToolCallIds": { + "items": { + "type": "string" }, - type: 'array', - }, - runId: { - type: 'string', + "type": "array" }, + "runId": { + "type": "string" + } }, - required: ['kind', 'checkpointId', 'runId', 'executionId', 'pendingToolCallIds'], - type: 'object', - }, - MothershipStreamV1CompactionDoneData: { - additionalProperties: false, - properties: { - summary_chars: { - type: 'integer', - }, + "required": [ + "kind", + "checkpointId", + "runId", + "executionId", + "pendingToolCallIds" + ], + "type": "object" + }, + "MothershipStreamV1CompactionDoneData": { + "additionalProperties": false, + "properties": { + "summary_chars": { + "type": "integer" + } }, - required: ['summary_chars'], - type: 'object', - }, - MothershipStreamV1CompactionDoneEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1CompactionDonePayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['run'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "summary_chars" + ], + "type": "object" + }, + "MothershipStreamV1CompactionDoneEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1CompactionDonePayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "run" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1CompactionDonePayload: { - additionalProperties: false, - properties: { - data: { - $ref: '#/$defs/MothershipStreamV1CompactionDoneData', - }, - kind: { - enum: ['compaction_done'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1CompactionDonePayload": { + "additionalProperties": false, + "properties": { + "data": { + "$ref": "#/$defs/MothershipStreamV1CompactionDoneData" + }, + "kind": { + "enum": [ + "compaction_done" + ], + "type": "string" + } }, - required: ['kind'], - type: 'object', - }, - MothershipStreamV1CompactionStartEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1CompactionStartPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['run'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind" + ], + "type": "object" + }, + "MothershipStreamV1CompactionStartEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1CompactionStartPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "run" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1CompactionStartPayload: { - additionalProperties: false, - properties: { - kind: { - enum: ['compaction_start'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1CompactionStartPayload": { + "additionalProperties": false, + "properties": { + "kind": { + "enum": [ + "compaction_start" + ], + "type": "string" + } }, - required: ['kind'], - type: 'object', - }, - MothershipStreamV1CompleteEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1CompletePayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['complete'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind" + ], + "type": "object" + }, + "MothershipStreamV1CompleteEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1CompletePayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "complete" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1CompletePayload: { - additionalProperties: false, - properties: { - cost: { - $ref: '#/$defs/MothershipStreamV1CostData', - }, - reason: { - type: 'string', - }, - response: true, - status: { - $ref: '#/$defs/MothershipStreamV1CompletionStatus', - }, - usage: { - $ref: '#/$defs/MothershipStreamV1UsageData', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1CompletePayload": { + "additionalProperties": false, + "properties": { + "cost": { + "$ref": "#/$defs/MothershipStreamV1CostData" + }, + "reason": { + "type": "string" + }, + "response": true, + "status": { + "$ref": "#/$defs/MothershipStreamV1CompletionStatus" + }, + "usage": { + "$ref": "#/$defs/MothershipStreamV1UsageData" + } }, - required: ['status'], - type: 'object', - }, - MothershipStreamV1CompletionStatus: { - enum: ['complete', 'error', 'cancelled'], - type: 'string', - }, - MothershipStreamV1CostData: { - additionalProperties: false, - properties: { - input: { - type: 'number', - }, - output: { - type: 'number', - }, - total: { - type: 'number', - }, + "required": [ + "status" + ], + "type": "object" + }, + "MothershipStreamV1CompletionStatus": { + "enum": [ + "complete", + "error", + "cancelled" + ], + "type": "string" + }, + "MothershipStreamV1CostData": { + "additionalProperties": false, + "properties": { + "input": { + "type": "number" + }, + "output": { + "type": "number" + }, + "total": { + "type": "number" + } }, - type: 'object', + "type": "object" }, - MothershipStreamV1ErrorEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1ErrorPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', + "MothershipStreamV1ErrorEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1ErrorPayload" }, - seq: { - type: 'integer', + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', + "seq": { + "type": "integer" }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" }, - ts: { - type: 'string', + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" }, - type: { - enum: ['error'], - type: 'string', + "ts": { + "type": "string" }, - v: { - enum: [1], - type: 'integer', + "type": { + "enum": [ + "error" + ], + "type": "string" }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1ErrorPayload: { - additionalProperties: false, - properties: { - code: { - type: 'string', - }, - data: true, - displayMessage: { - type: 'string', - }, - error: { - type: 'string', - }, - message: { - type: 'string', - }, - provider: { - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1ErrorPayload": { + "additionalProperties": false, + "properties": { + "code": { + "type": "string" + }, + "data": true, + "displayMessage": { + "type": "string" + }, + "error": { + "type": "string" + }, + "message": { + "type": "string" + }, + "provider": { + "type": "string" + } }, - required: ['message'], - type: 'object', - }, - MothershipStreamV1EventEnvelopeCommon: { - additionalProperties: false, - properties: { - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "message" + ], + "type": "object" + }, + "MothershipStreamV1EventEnvelopeCommon": { + "additionalProperties": false, + "properties": { + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream'], - type: 'object', - }, - MothershipStreamV1EventType: { - enum: ['session', 'text', 'tool', 'span', 'resource', 'run', 'error', 'complete'], - type: 'string', - }, - MothershipStreamV1ResourceDescriptor: { - additionalProperties: false, - properties: { - id: { - type: 'string', - }, - title: { - type: 'string', - }, - type: { - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream" + ], + "type": "object" + }, + "MothershipStreamV1EventType": { + "enum": [ + "session", + "text", + "tool", + "span", + "resource", + "run", + "error", + "complete" + ], + "type": "string" + }, + "MothershipStreamV1ResourceDescriptor": { + "additionalProperties": false, + "properties": { + "id": { + "type": "string" + }, + "title": { + "type": "string" + }, + "type": { + "type": "string" + } }, - required: ['type', 'id'], - type: 'object', - }, - MothershipStreamV1ResourceOp: { - enum: ['upsert', 'remove'], - type: 'string', - }, - MothershipStreamV1ResourceRemoveEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1ResourceRemovePayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['resource'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "type", + "id" + ], + "type": "object" + }, + "MothershipStreamV1ResourceOp": { + "enum": [ + "upsert", + "remove" + ], + "type": "string" + }, + "MothershipStreamV1ResourceRemoveEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1ResourceRemovePayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "resource" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1ResourceRemovePayload: { - additionalProperties: false, - properties: { - op: { - enum: ['remove'], - type: 'string', - }, - resource: { - $ref: '#/$defs/MothershipStreamV1ResourceDescriptor', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1ResourceRemovePayload": { + "additionalProperties": false, + "properties": { + "op": { + "enum": [ + "remove" + ], + "type": "string" + }, + "resource": { + "$ref": "#/$defs/MothershipStreamV1ResourceDescriptor" + } }, - required: ['op', 'resource'], - type: 'object', - }, - MothershipStreamV1ResourceUpsertEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1ResourceUpsertPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['resource'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "op", + "resource" + ], + "type": "object" + }, + "MothershipStreamV1ResourceUpsertEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1ResourceUpsertPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "resource" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1ResourceUpsertPayload: { - additionalProperties: false, - properties: { - op: { - enum: ['upsert'], - type: 'string', - }, - resource: { - $ref: '#/$defs/MothershipStreamV1ResourceDescriptor', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1ResourceUpsertPayload": { + "additionalProperties": false, + "properties": { + "op": { + "enum": [ + "upsert" + ], + "type": "string" + }, + "resource": { + "$ref": "#/$defs/MothershipStreamV1ResourceDescriptor" + } }, - required: ['op', 'resource'], - type: 'object', - }, - MothershipStreamV1ResumeRequest: { - additionalProperties: false, - properties: { - checkpointId: { - type: 'string', - }, - results: { - items: { - $ref: '#/$defs/MothershipStreamV1ResumeToolResult', + "required": [ + "op", + "resource" + ], + "type": "object" + }, + "MothershipStreamV1ResumeRequest": { + "additionalProperties": false, + "properties": { + "checkpointId": { + "type": "string" + }, + "results": { + "items": { + "$ref": "#/$defs/MothershipStreamV1ResumeToolResult" }, - type: 'array', - }, - streamId: { - type: 'string', + "type": "array" }, + "streamId": { + "type": "string" + } }, - required: ['streamId', 'checkpointId', 'results'], - type: 'object', - }, - MothershipStreamV1ResumeToolResult: { - additionalProperties: false, - properties: { - error: { - type: 'string', - }, - output: true, - success: { - type: 'boolean', - }, - toolCallId: { - type: 'string', - }, + "required": [ + "streamId", + "checkpointId", + "results" + ], + "type": "object" + }, + "MothershipStreamV1ResumeToolResult": { + "additionalProperties": false, + "properties": { + "error": { + "type": "string" + }, + "output": true, + "success": { + "type": "boolean" + }, + "toolCallId": { + "type": "string" + } }, - required: ['toolCallId', 'success'], - type: 'object', - }, - MothershipStreamV1RunKind: { - enum: ['checkpoint_pause', 'resumed', 'compaction_start', 'compaction_done'], - type: 'string', - }, - MothershipStreamV1RunResumedEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1RunResumedPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['run'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "toolCallId", + "success" + ], + "type": "object" + }, + "MothershipStreamV1RunKind": { + "enum": [ + "checkpoint_pause", + "resumed", + "compaction_start", + "compaction_done" + ], + "type": "string" + }, + "MothershipStreamV1RunResumedEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1RunResumedPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "run" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1RunResumedPayload: { - additionalProperties: false, - properties: { - kind: { - enum: ['resumed'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1RunResumedPayload": { + "additionalProperties": false, + "properties": { + "kind": { + "enum": [ + "resumed" + ], + "type": "string" + } }, - required: ['kind'], - type: 'object', - }, - MothershipStreamV1SessionChatEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1SessionChatPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['session'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind" + ], + "type": "object" + }, + "MothershipStreamV1SessionChatEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1SessionChatPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "session" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1SessionChatPayload: { - additionalProperties: false, - properties: { - chatId: { - type: 'string', - }, - kind: { - enum: ['chat'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1SessionChatPayload": { + "additionalProperties": false, + "properties": { + "chatId": { + "type": "string" + }, + "kind": { + "enum": [ + "chat" + ], + "type": "string" + } }, - required: ['kind', 'chatId'], - type: 'object', - }, - MothershipStreamV1SessionKind: { - enum: ['trace', 'chat', 'title', 'start'], - type: 'string', - }, - MothershipStreamV1SessionStartData: { - additionalProperties: false, - properties: { - responseId: { - type: 'string', - }, + "required": [ + "kind", + "chatId" + ], + "type": "object" + }, + "MothershipStreamV1SessionKind": { + "enum": [ + "trace", + "chat", + "title", + "start" + ], + "type": "string" + }, + "MothershipStreamV1SessionStartData": { + "additionalProperties": false, + "properties": { + "responseId": { + "type": "string" + } }, - type: 'object', + "type": "object" }, - MothershipStreamV1SessionStartEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1SessionStartPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', + "MothershipStreamV1SessionStartEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1SessionStartPayload" }, - seq: { - type: 'integer', + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', + "seq": { + "type": "integer" }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" }, - ts: { - type: 'string', + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" }, - type: { - enum: ['session'], - type: 'string', + "ts": { + "type": "string" }, - v: { - enum: [1], - type: 'integer', + "type": { + "enum": [ + "session" + ], + "type": "string" }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1SessionStartPayload: { - additionalProperties: false, - properties: { - data: { - $ref: '#/$defs/MothershipStreamV1SessionStartData', - }, - kind: { - enum: ['start'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1SessionStartPayload": { + "additionalProperties": false, + "properties": { + "data": { + "$ref": "#/$defs/MothershipStreamV1SessionStartData" + }, + "kind": { + "enum": [ + "start" + ], + "type": "string" + } }, - required: ['kind'], - type: 'object', - }, - MothershipStreamV1SessionTitleEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1SessionTitlePayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['session'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind" + ], + "type": "object" + }, + "MothershipStreamV1SessionTitleEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1SessionTitlePayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "session" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1SessionTitlePayload: { - additionalProperties: false, - properties: { - kind: { - enum: ['title'], - type: 'string', - }, - title: { - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1SessionTitlePayload": { + "additionalProperties": false, + "properties": { + "kind": { + "enum": [ + "title" + ], + "type": "string" + }, + "title": { + "type": "string" + } }, - required: ['kind', 'title'], - type: 'object', - }, - MothershipStreamV1SessionTraceEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1SessionTracePayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['session'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind", + "title" + ], + "type": "object" + }, + "MothershipStreamV1SessionTraceEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1SessionTracePayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "session" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1SessionTracePayload: { - additionalProperties: false, - properties: { - kind: { - enum: ['trace'], - type: 'string', - }, - requestId: { - type: 'string', - }, - spanId: { - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1SessionTracePayload": { + "additionalProperties": false, + "properties": { + "kind": { + "enum": [ + "trace" + ], + "type": "string" + }, + "requestId": { + "type": "string" + }, + "spanId": { + "type": "string" + } }, - required: ['kind', 'requestId'], - type: 'object', - }, - MothershipStreamV1SpanKind: { - enum: ['subagent'], - type: 'string', - }, - MothershipStreamV1SpanLifecycleEvent: { - enum: ['start', 'end'], - type: 'string', - }, - MothershipStreamV1SpanPayloadKind: { - enum: ['subagent', 'structured_result', 'subagent_result'], - type: 'string', - }, - MothershipStreamV1StreamCursor: { - additionalProperties: false, - properties: { - cursor: { - type: 'string', - }, - seq: { - type: 'integer', - }, - streamId: { - type: 'string', - }, + "required": [ + "kind", + "requestId" + ], + "type": "object" + }, + "MothershipStreamV1SpanKind": { + "enum": [ + "subagent" + ], + "type": "string" + }, + "MothershipStreamV1SpanLifecycleEvent": { + "enum": [ + "start", + "end" + ], + "type": "string" + }, + "MothershipStreamV1SpanPayloadKind": { + "enum": [ + "subagent", + "structured_result", + "subagent_result" + ], + "type": "string" + }, + "MothershipStreamV1StreamCursor": { + "additionalProperties": false, + "properties": { + "cursor": { + "type": "string" + }, + "seq": { + "type": "integer" + }, + "streamId": { + "type": "string" + } }, - required: ['streamId', 'cursor', 'seq'], - type: 'object', - }, - MothershipStreamV1StreamRef: { - additionalProperties: false, - properties: { - chatId: { - type: 'string', - }, - cursor: { - type: 'string', - }, - streamId: { - type: 'string', - }, + "required": [ + "streamId", + "cursor", + "seq" + ], + "type": "object" + }, + "MothershipStreamV1StreamRef": { + "additionalProperties": false, + "properties": { + "chatId": { + "type": "string" + }, + "cursor": { + "type": "string" + }, + "streamId": { + "type": "string" + } }, - required: ['streamId'], - type: 'object', - }, - MothershipStreamV1StreamScope: { - additionalProperties: false, - properties: { - agentId: { - type: 'string', - }, - lane: { - enum: ['subagent'], - type: 'string', - }, - parentToolCallId: { - type: 'string', - }, + "required": [ + "streamId" + ], + "type": "object" + }, + "MothershipStreamV1StreamScope": { + "additionalProperties": false, + "properties": { + "agentId": { + "type": "string" + }, + "lane": { + "enum": [ + "subagent" + ], + "type": "string" + }, + "parentToolCallId": { + "type": "string" + } }, - required: ['lane'], - type: 'object', - }, - MothershipStreamV1StructuredResultSpanEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1StructuredResultSpanPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['span'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "lane" + ], + "type": "object" + }, + "MothershipStreamV1StructuredResultSpanEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1StructuredResultSpanPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "span" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1StructuredResultSpanPayload: { - additionalProperties: false, - properties: { - agent: { - type: 'string', - }, - data: true, - kind: { - enum: ['structured_result'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1StructuredResultSpanPayload": { + "additionalProperties": false, + "properties": { + "agent": { + "type": "string" + }, + "data": true, + "kind": { + "enum": [ + "structured_result" + ], + "type": "string" + } }, - required: ['kind'], - type: 'object', - }, - MothershipStreamV1SubagentResultSpanEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1SubagentResultSpanPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['span'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind" + ], + "type": "object" + }, + "MothershipStreamV1SubagentResultSpanEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1SubagentResultSpanPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "span" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1SubagentResultSpanPayload: { - additionalProperties: false, - properties: { - agent: { - type: 'string', - }, - data: true, - kind: { - enum: ['subagent_result'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1SubagentResultSpanPayload": { + "additionalProperties": false, + "properties": { + "agent": { + "type": "string" + }, + "data": true, + "kind": { + "enum": [ + "subagent_result" + ], + "type": "string" + } }, - required: ['kind'], - type: 'object', - }, - MothershipStreamV1SubagentSpanEndEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1SubagentSpanEndPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['span'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind" + ], + "type": "object" + }, + "MothershipStreamV1SubagentSpanEndEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1SubagentSpanEndPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "span" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1SubagentSpanEndPayload: { - additionalProperties: false, - properties: { - agent: { - type: 'string', - }, - data: true, - event: { - enum: ['end'], - type: 'string', - }, - kind: { - enum: ['subagent'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1SubagentSpanEndPayload": { + "additionalProperties": false, + "properties": { + "agent": { + "type": "string" + }, + "data": true, + "event": { + "enum": [ + "end" + ], + "type": "string" + }, + "kind": { + "enum": [ + "subagent" + ], + "type": "string" + } }, - required: ['kind', 'event'], - type: 'object', - }, - MothershipStreamV1SubagentSpanStartEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1SubagentSpanStartPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['span'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind", + "event" + ], + "type": "object" + }, + "MothershipStreamV1SubagentSpanStartEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1SubagentSpanStartPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "span" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1SubagentSpanStartPayload: { - additionalProperties: false, - properties: { - agent: { - type: 'string', - }, - data: true, - event: { - enum: ['start'], - type: 'string', - }, - kind: { - enum: ['subagent'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1SubagentSpanStartPayload": { + "additionalProperties": false, + "properties": { + "agent": { + "type": "string" + }, + "data": true, + "event": { + "enum": [ + "start" + ], + "type": "string" + }, + "kind": { + "enum": [ + "subagent" + ], + "type": "string" + } }, - required: ['kind', 'event'], - type: 'object', - }, - MothershipStreamV1TextChannel: { - enum: ['assistant', 'thinking'], - type: 'string', - }, - MothershipStreamV1TextEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1TextPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['text'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind", + "event" + ], + "type": "object" + }, + "MothershipStreamV1TextChannel": { + "enum": [ + "assistant", + "thinking" + ], + "type": "string" + }, + "MothershipStreamV1TextEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1TextPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "text" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1TextPayload: { - additionalProperties: false, - properties: { - channel: { - $ref: '#/$defs/MothershipStreamV1TextChannel', - }, - text: { - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1TextPayload": { + "additionalProperties": false, + "properties": { + "channel": { + "$ref": "#/$defs/MothershipStreamV1TextChannel" + }, + "text": { + "type": "string" + } }, - required: ['channel', 'text'], - type: 'object', - }, - MothershipStreamV1ToolArgsDeltaEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1ToolArgsDeltaPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['tool'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "channel", + "text" + ], + "type": "object" + }, + "MothershipStreamV1ToolArgsDeltaEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1ToolArgsDeltaPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "tool" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1ToolArgsDeltaPayload: { - additionalProperties: false, - properties: { - argumentsDelta: { - type: 'string', - }, - executor: { - $ref: '#/$defs/MothershipStreamV1ToolExecutor', - }, - mode: { - $ref: '#/$defs/MothershipStreamV1ToolMode', - }, - phase: { - enum: ['args_delta'], - type: 'string', - }, - toolCallId: { - type: 'string', - }, - toolName: { - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1ToolArgsDeltaPayload": { + "additionalProperties": false, + "properties": { + "argumentsDelta": { + "type": "string" + }, + "executor": { + "$ref": "#/$defs/MothershipStreamV1ToolExecutor" + }, + "mode": { + "$ref": "#/$defs/MothershipStreamV1ToolMode" + }, + "phase": { + "enum": [ + "args_delta" + ], + "type": "string" + }, + "toolCallId": { + "type": "string" + }, + "toolName": { + "type": "string" + } }, - required: ['toolCallId', 'toolName', 'argumentsDelta', 'executor', 'mode', 'phase'], - type: 'object', - }, - MothershipStreamV1ToolCallDescriptor: { - additionalProperties: false, - properties: { - arguments: { - $ref: '#/$defs/MothershipStreamV1AdditionalPropertiesMap', - }, - executor: { - $ref: '#/$defs/MothershipStreamV1ToolExecutor', - }, - mode: { - $ref: '#/$defs/MothershipStreamV1ToolMode', - }, - partial: { - type: 'boolean', - }, - phase: { - enum: ['call'], - type: 'string', - }, - requiresConfirmation: { - type: 'boolean', - }, - status: { - $ref: '#/$defs/MothershipStreamV1ToolStatus', - }, - toolCallId: { - type: 'string', - }, - toolName: { - type: 'string', - }, - ui: { - $ref: '#/$defs/MothershipStreamV1ToolUI', - }, + "required": [ + "toolCallId", + "toolName", + "argumentsDelta", + "executor", + "mode", + "phase" + ], + "type": "object" + }, + "MothershipStreamV1ToolCallDescriptor": { + "additionalProperties": false, + "properties": { + "arguments": { + "$ref": "#/$defs/MothershipStreamV1AdditionalPropertiesMap" + }, + "executor": { + "$ref": "#/$defs/MothershipStreamV1ToolExecutor" + }, + "mode": { + "$ref": "#/$defs/MothershipStreamV1ToolMode" + }, + "partial": { + "type": "boolean" + }, + "phase": { + "enum": [ + "call" + ], + "type": "string" + }, + "requiresConfirmation": { + "type": "boolean" + }, + "status": { + "$ref": "#/$defs/MothershipStreamV1ToolStatus" + }, + "toolCallId": { + "type": "string" + }, + "toolName": { + "type": "string" + }, + "ui": { + "$ref": "#/$defs/MothershipStreamV1ToolUI" + } }, - required: ['toolCallId', 'toolName', 'executor', 'mode', 'phase'], - type: 'object', - }, - MothershipStreamV1ToolCallEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1ToolCallDescriptor', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['tool'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "toolCallId", + "toolName", + "executor", + "mode", + "phase" + ], + "type": "object" + }, + "MothershipStreamV1ToolCallEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1ToolCallDescriptor" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "tool" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1ToolExecutor: { - enum: ['go', 'sim', 'client'], - type: 'string', - }, - MothershipStreamV1ToolMode: { - enum: ['sync', 'async'], - type: 'string', - }, - MothershipStreamV1ToolOutcome: { - enum: ['success', 'error', 'cancelled', 'skipped', 'rejected'], - type: 'string', - }, - MothershipStreamV1ToolPhase: { - enum: ['call', 'args_delta', 'result'], - type: 'string', - }, - MothershipStreamV1ToolResultEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1ToolResultPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['tool'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1ToolExecutor": { + "enum": [ + "go", + "sim", + "client" + ], + "type": "string" + }, + "MothershipStreamV1ToolMode": { + "enum": [ + "sync", + "async" + ], + "type": "string" + }, + "MothershipStreamV1ToolOutcome": { + "enum": [ + "success", + "error", + "cancelled", + "skipped", + "rejected" + ], + "type": "string" + }, + "MothershipStreamV1ToolPhase": { + "enum": [ + "call", + "args_delta", + "result" + ], + "type": "string" + }, + "MothershipStreamV1ToolResultEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1ToolResultPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "tool" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1ToolResultPayload: { - additionalProperties: false, - properties: { - error: { - type: 'string', - }, - executor: { - $ref: '#/$defs/MothershipStreamV1ToolExecutor', - }, - mode: { - $ref: '#/$defs/MothershipStreamV1ToolMode', - }, - output: true, - phase: { - enum: ['result'], - type: 'string', - }, - status: { - $ref: '#/$defs/MothershipStreamV1ToolStatus', - }, - success: { - type: 'boolean', - }, - toolCallId: { - type: 'string', - }, - toolName: { - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1ToolResultPayload": { + "additionalProperties": false, + "properties": { + "error": { + "type": "string" + }, + "executor": { + "$ref": "#/$defs/MothershipStreamV1ToolExecutor" + }, + "mode": { + "$ref": "#/$defs/MothershipStreamV1ToolMode" + }, + "output": true, + "phase": { + "enum": [ + "result" + ], + "type": "string" + }, + "status": { + "$ref": "#/$defs/MothershipStreamV1ToolStatus" + }, + "success": { + "type": "boolean" + }, + "toolCallId": { + "type": "string" + }, + "toolName": { + "type": "string" + } }, - required: ['toolCallId', 'toolName', 'executor', 'mode', 'phase', 'success'], - type: 'object', - }, - MothershipStreamV1ToolStatus: { - enum: ['generating', 'executing', 'success', 'error', 'cancelled', 'skipped', 'rejected'], - type: 'string', - }, - MothershipStreamV1ToolUI: { - additionalProperties: false, - properties: { - clientExecutable: { - type: 'boolean', - }, - hidden: { - type: 'boolean', - }, - icon: { - type: 'string', - }, - internal: { - type: 'boolean', - }, - phaseLabel: { - type: 'string', - }, - requiresConfirmation: { - type: 'boolean', - }, - title: { - type: 'string', - }, + "required": [ + "toolCallId", + "toolName", + "executor", + "mode", + "phase", + "success" + ], + "type": "object" + }, + "MothershipStreamV1ToolStatus": { + "enum": [ + "generating", + "executing", + "success", + "error", + "cancelled", + "skipped", + "rejected" + ], + "type": "string" + }, + "MothershipStreamV1ToolUI": { + "additionalProperties": false, + "properties": { + "clientExecutable": { + "type": "boolean" + }, + "hidden": { + "type": "boolean" + }, + "icon": { + "type": "string" + }, + "internal": { + "type": "boolean" + }, + "phaseLabel": { + "type": "string" + }, + "requiresConfirmation": { + "type": "boolean" + }, + "title": { + "type": "string" + } }, - type: 'object', + "type": "object" }, - MothershipStreamV1Trace: { - additionalProperties: false, - properties: { - requestId: { - type: 'string', + "MothershipStreamV1Trace": { + "additionalProperties": false, + "properties": { + "goTraceId": { + "description": "OTel trace ID from the first Go ingress. May differ from requestId when Sim assigns the canonical request identity.", + "type": "string" }, - spanId: { - type: 'string', + "requestId": { + "type": "string" }, + "spanId": { + "type": "string" + } }, - required: ['requestId'], - type: 'object', + "required": [ + "requestId" + ], + "type": "object" }, - MothershipStreamV1UsageData: { - additionalProperties: false, - properties: { - cache_creation_input_tokens: { - type: 'integer', - }, - cache_read_input_tokens: { - type: 'integer', + "MothershipStreamV1UsageData": { + "additionalProperties": false, + "properties": { + "cache_creation_input_tokens": { + "type": "integer" }, - input_tokens: { - type: 'integer', + "cache_read_input_tokens": { + "type": "integer" }, - model: { - type: 'string', + "input_tokens": { + "type": "integer" }, - output_tokens: { - type: 'integer', + "model": { + "type": "string" }, - total_tokens: { - type: 'integer', + "output_tokens": { + "type": "integer" }, + "total_tokens": { + "type": "integer" + } }, - type: 'object', - }, + "type": "object" + } }, - $id: 'mothership-stream-v1.schema.json', - $schema: 'https://json-schema.org/draft/2020-12/schema', - description: 'Shared execution-oriented mothership stream contract from Go to Sim.', - oneOf: [ + "$id": "mothership-stream-v1.schema.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "Shared execution-oriented mothership stream contract from Go to Sim.", + "oneOf": [ { - $ref: '#/$defs/MothershipStreamV1SessionStartEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1SessionStartEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1SessionChatEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1SessionChatEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1SessionTitleEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1SessionTitleEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1SessionTraceEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1SessionTraceEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1TextEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1TextEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1ToolCallEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1ToolCallEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1ToolArgsDeltaEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1ToolArgsDeltaEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1ToolResultEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1ToolResultEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1SubagentSpanStartEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1SubagentSpanStartEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1SubagentSpanEndEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1SubagentSpanEndEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1StructuredResultSpanEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1StructuredResultSpanEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1SubagentResultSpanEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1SubagentResultSpanEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1ResourceUpsertEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1ResourceUpsertEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1ResourceRemoveEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1ResourceRemoveEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1CheckpointPauseEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1CheckpointPauseEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1RunResumedEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1RunResumedEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1CompactionStartEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1CompactionStartEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1CompactionDoneEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1CompactionDoneEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1ErrorEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1ErrorEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1CompleteEventEnvelope', - }, + "$ref": "#/$defs/MothershipStreamV1CompleteEventEnvelope" + } ], - title: 'MothershipStreamV1EventEnvelope', + "title": "MothershipStreamV1EventEnvelope" } diff --git a/apps/sim/lib/copilot/generated/mothership-stream-v1.ts b/apps/sim/lib/copilot/generated/mothership-stream-v1.ts index 24841264e6c..95d86b3823d 100644 --- a/apps/sim/lib/copilot/generated/mothership-stream-v1.ts +++ b/apps/sim/lib/copilot/generated/mothership-stream-v1.ts @@ -24,530 +24,512 @@ export type MothershipStreamV1EventEnvelope = | MothershipStreamV1CompactionStartEventEnvelope | MothershipStreamV1CompactionDoneEventEnvelope | MothershipStreamV1ErrorEventEnvelope - | MothershipStreamV1CompleteEventEnvelope -export type MothershipStreamV1TextChannel = 'assistant' | 'thinking' -export type MothershipStreamV1ToolExecutor = 'go' | 'sim' | 'client' -export type MothershipStreamV1ToolMode = 'sync' | 'async' + | MothershipStreamV1CompleteEventEnvelope; +export type MothershipStreamV1TextChannel = "assistant" | "thinking"; +export type MothershipStreamV1ToolExecutor = "go" | "sim" | "client"; +export type MothershipStreamV1ToolMode = "sync" | "async"; export type MothershipStreamV1ToolStatus = - | 'generating' - | 'executing' - | 'success' - | 'error' - | 'cancelled' - | 'skipped' - | 'rejected' -export type MothershipStreamV1CompletionStatus = 'complete' | 'error' | 'cancelled' + | "generating" + | "executing" + | "success" + | "error" + | "cancelled" + | "skipped" + | "rejected"; +export type MothershipStreamV1CompletionStatus = "complete" | "error" | "cancelled"; export interface MothershipStreamV1SessionStartEventEnvelope { - payload: MothershipStreamV1SessionStartPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'session' - v: 1 + payload: MothershipStreamV1SessionStartPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "session"; + v: 1; } export interface MothershipStreamV1SessionStartPayload { - data?: MothershipStreamV1SessionStartData - kind: 'start' + data?: MothershipStreamV1SessionStartData; + kind: "start"; } export interface MothershipStreamV1SessionStartData { - responseId?: string + responseId?: string; } export interface MothershipStreamV1StreamScope { - agentId?: string - lane: 'subagent' - parentToolCallId?: string + agentId?: string; + lane: "subagent"; + parentToolCallId?: string; } export interface MothershipStreamV1StreamRef { - chatId?: string - cursor?: string - streamId: string + chatId?: string; + cursor?: string; + streamId: string; } export interface MothershipStreamV1Trace { - requestId: string - spanId?: string + /** + * OTel trace ID from the first Go ingress. May differ from requestId when Sim assigns the canonical request identity. + */ + goTraceId?: string; + requestId: string; + spanId?: string; } export interface MothershipStreamV1SessionChatEventEnvelope { - payload: MothershipStreamV1SessionChatPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'session' - v: 1 + payload: MothershipStreamV1SessionChatPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "session"; + v: 1; } export interface MothershipStreamV1SessionChatPayload { - chatId: string - kind: 'chat' + chatId: string; + kind: "chat"; } export interface MothershipStreamV1SessionTitleEventEnvelope { - payload: MothershipStreamV1SessionTitlePayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'session' - v: 1 + payload: MothershipStreamV1SessionTitlePayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "session"; + v: 1; } export interface MothershipStreamV1SessionTitlePayload { - kind: 'title' - title: string + kind: "title"; + title: string; } export interface MothershipStreamV1SessionTraceEventEnvelope { - payload: MothershipStreamV1SessionTracePayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'session' - v: 1 + payload: MothershipStreamV1SessionTracePayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "session"; + v: 1; } export interface MothershipStreamV1SessionTracePayload { - kind: 'trace' - requestId: string - spanId?: string + kind: "trace"; + requestId: string; + spanId?: string; } export interface MothershipStreamV1TextEventEnvelope { - payload: MothershipStreamV1TextPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'text' - v: 1 + payload: MothershipStreamV1TextPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "text"; + v: 1; } export interface MothershipStreamV1TextPayload { - channel: MothershipStreamV1TextChannel - text: string + channel: MothershipStreamV1TextChannel; + text: string; } export interface MothershipStreamV1ToolCallEventEnvelope { - payload: MothershipStreamV1ToolCallDescriptor - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'tool' - v: 1 + payload: MothershipStreamV1ToolCallDescriptor; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "tool"; + v: 1; } export interface MothershipStreamV1ToolCallDescriptor { - arguments?: MothershipStreamV1AdditionalPropertiesMap - executor: MothershipStreamV1ToolExecutor - mode: MothershipStreamV1ToolMode - partial?: boolean - phase: 'call' - requiresConfirmation?: boolean - status?: MothershipStreamV1ToolStatus - toolCallId: string - toolName: string - ui?: MothershipStreamV1ToolUI + arguments?: MothershipStreamV1AdditionalPropertiesMap; + executor: MothershipStreamV1ToolExecutor; + mode: MothershipStreamV1ToolMode; + partial?: boolean; + phase: "call"; + requiresConfirmation?: boolean; + status?: MothershipStreamV1ToolStatus; + toolCallId: string; + toolName: string; + ui?: MothershipStreamV1ToolUI; } export interface MothershipStreamV1AdditionalPropertiesMap { - [k: string]: unknown + [k: string]: unknown; } export interface MothershipStreamV1ToolUI { - clientExecutable?: boolean - hidden?: boolean - icon?: string - internal?: boolean - phaseLabel?: string - requiresConfirmation?: boolean - title?: string + clientExecutable?: boolean; + hidden?: boolean; + icon?: string; + internal?: boolean; + phaseLabel?: string; + requiresConfirmation?: boolean; + title?: string; } export interface MothershipStreamV1ToolArgsDeltaEventEnvelope { - payload: MothershipStreamV1ToolArgsDeltaPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'tool' - v: 1 + payload: MothershipStreamV1ToolArgsDeltaPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "tool"; + v: 1; } export interface MothershipStreamV1ToolArgsDeltaPayload { - argumentsDelta: string - executor: MothershipStreamV1ToolExecutor - mode: MothershipStreamV1ToolMode - phase: 'args_delta' - toolCallId: string - toolName: string + argumentsDelta: string; + executor: MothershipStreamV1ToolExecutor; + mode: MothershipStreamV1ToolMode; + phase: "args_delta"; + toolCallId: string; + toolName: string; } export interface MothershipStreamV1ToolResultEventEnvelope { - payload: MothershipStreamV1ToolResultPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'tool' - v: 1 + payload: MothershipStreamV1ToolResultPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "tool"; + v: 1; } export interface MothershipStreamV1ToolResultPayload { - error?: string - executor: MothershipStreamV1ToolExecutor - mode: MothershipStreamV1ToolMode - output?: unknown - phase: 'result' - status?: MothershipStreamV1ToolStatus - success: boolean - toolCallId: string - toolName: string + error?: string; + executor: MothershipStreamV1ToolExecutor; + mode: MothershipStreamV1ToolMode; + output?: unknown; + phase: "result"; + status?: MothershipStreamV1ToolStatus; + success: boolean; + toolCallId: string; + toolName: string; } export interface MothershipStreamV1SubagentSpanStartEventEnvelope { - payload: MothershipStreamV1SubagentSpanStartPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'span' - v: 1 + payload: MothershipStreamV1SubagentSpanStartPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "span"; + v: 1; } export interface MothershipStreamV1SubagentSpanStartPayload { - agent?: string - data?: unknown - event: 'start' - kind: 'subagent' + agent?: string; + data?: unknown; + event: "start"; + kind: "subagent"; } export interface MothershipStreamV1SubagentSpanEndEventEnvelope { - payload: MothershipStreamV1SubagentSpanEndPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'span' - v: 1 + payload: MothershipStreamV1SubagentSpanEndPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "span"; + v: 1; } export interface MothershipStreamV1SubagentSpanEndPayload { - agent?: string - data?: unknown - event: 'end' - kind: 'subagent' + agent?: string; + data?: unknown; + event: "end"; + kind: "subagent"; } export interface MothershipStreamV1StructuredResultSpanEventEnvelope { - payload: MothershipStreamV1StructuredResultSpanPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'span' - v: 1 + payload: MothershipStreamV1StructuredResultSpanPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "span"; + v: 1; } export interface MothershipStreamV1StructuredResultSpanPayload { - agent?: string - data?: unknown - kind: 'structured_result' + agent?: string; + data?: unknown; + kind: "structured_result"; } export interface MothershipStreamV1SubagentResultSpanEventEnvelope { - payload: MothershipStreamV1SubagentResultSpanPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'span' - v: 1 + payload: MothershipStreamV1SubagentResultSpanPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "span"; + v: 1; } export interface MothershipStreamV1SubagentResultSpanPayload { - agent?: string - data?: unknown - kind: 'subagent_result' + agent?: string; + data?: unknown; + kind: "subagent_result"; } export interface MothershipStreamV1ResourceUpsertEventEnvelope { - payload: MothershipStreamV1ResourceUpsertPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'resource' - v: 1 + payload: MothershipStreamV1ResourceUpsertPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "resource"; + v: 1; } export interface MothershipStreamV1ResourceUpsertPayload { - op: 'upsert' - resource: MothershipStreamV1ResourceDescriptor + op: "upsert"; + resource: MothershipStreamV1ResourceDescriptor; } export interface MothershipStreamV1ResourceDescriptor { - id: string - title?: string - type: string + id: string; + title?: string; + type: string; } export interface MothershipStreamV1ResourceRemoveEventEnvelope { - payload: MothershipStreamV1ResourceRemovePayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'resource' - v: 1 + payload: MothershipStreamV1ResourceRemovePayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "resource"; + v: 1; } export interface MothershipStreamV1ResourceRemovePayload { - op: 'remove' - resource: MothershipStreamV1ResourceDescriptor + op: "remove"; + resource: MothershipStreamV1ResourceDescriptor; } export interface MothershipStreamV1CheckpointPauseEventEnvelope { - payload: MothershipStreamV1CheckpointPausePayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'run' - v: 1 + payload: MothershipStreamV1CheckpointPausePayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "run"; + v: 1; } export interface MothershipStreamV1CheckpointPausePayload { - checkpointId: string - executionId: string - frames?: MothershipStreamV1CheckpointPauseFrame[] - kind: 'checkpoint_pause' - pendingToolCallIds: string[] - runId: string + checkpointId: string; + executionId: string; + frames?: MothershipStreamV1CheckpointPauseFrame[]; + kind: "checkpoint_pause"; + pendingToolCallIds: string[]; + runId: string; } export interface MothershipStreamV1CheckpointPauseFrame { - parentToolCallId: string - parentToolName: string - pendingToolIds: string[] + parentToolCallId: string; + parentToolName: string; + pendingToolIds: string[]; } export interface MothershipStreamV1RunResumedEventEnvelope { - payload: MothershipStreamV1RunResumedPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'run' - v: 1 + payload: MothershipStreamV1RunResumedPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "run"; + v: 1; } export interface MothershipStreamV1RunResumedPayload { - kind: 'resumed' + kind: "resumed"; } export interface MothershipStreamV1CompactionStartEventEnvelope { - payload: MothershipStreamV1CompactionStartPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'run' - v: 1 + payload: MothershipStreamV1CompactionStartPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "run"; + v: 1; } export interface MothershipStreamV1CompactionStartPayload { - kind: 'compaction_start' + kind: "compaction_start"; } export interface MothershipStreamV1CompactionDoneEventEnvelope { - payload: MothershipStreamV1CompactionDonePayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'run' - v: 1 + payload: MothershipStreamV1CompactionDonePayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "run"; + v: 1; } export interface MothershipStreamV1CompactionDonePayload { - data?: MothershipStreamV1CompactionDoneData - kind: 'compaction_done' + data?: MothershipStreamV1CompactionDoneData; + kind: "compaction_done"; } export interface MothershipStreamV1CompactionDoneData { - summary_chars: number + summary_chars: number; } export interface MothershipStreamV1ErrorEventEnvelope { - payload: MothershipStreamV1ErrorPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'error' - v: 1 + payload: MothershipStreamV1ErrorPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "error"; + v: 1; } export interface MothershipStreamV1ErrorPayload { - code?: string - data?: unknown - displayMessage?: string - error?: string - message: string - provider?: string + code?: string; + data?: unknown; + displayMessage?: string; + error?: string; + message: string; + provider?: string; } export interface MothershipStreamV1CompleteEventEnvelope { - payload: MothershipStreamV1CompletePayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'complete' - v: 1 + payload: MothershipStreamV1CompletePayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "complete"; + v: 1; } export interface MothershipStreamV1CompletePayload { - cost?: MothershipStreamV1CostData - reason?: string - response?: unknown - status: MothershipStreamV1CompletionStatus - usage?: MothershipStreamV1UsageData + cost?: MothershipStreamV1CostData; + reason?: string; + response?: unknown; + status: MothershipStreamV1CompletionStatus; + usage?: MothershipStreamV1UsageData; } export interface MothershipStreamV1CostData { - input?: number - output?: number - total?: number + input?: number; + output?: number; + total?: number; } export interface MothershipStreamV1UsageData { - cache_creation_input_tokens?: number - cache_read_input_tokens?: number - input_tokens?: number - model?: string - output_tokens?: number - total_tokens?: number + cache_creation_input_tokens?: number; + cache_read_input_tokens?: number; + input_tokens?: number; + model?: string; + output_tokens?: number; + total_tokens?: number; } -export type MothershipStreamV1AsyncToolRecordStatus = - | 'pending' - | 'running' - | 'completed' - | 'failed' - | 'cancelled' - | 'delivered' +export type MothershipStreamV1AsyncToolRecordStatus = "pending" | "running" | "completed" | "failed" | "cancelled" | "delivered" export const MothershipStreamV1AsyncToolRecordStatus = { - pending: 'pending', - running: 'running', - completed: 'completed', - failed: 'failed', - cancelled: 'cancelled', - delivered: 'delivered', -} as const + "pending": "pending", + "running": "running", + "completed": "completed", + "failed": "failed", + "cancelled": "cancelled", + "delivered": "delivered", +} as const; export const MothershipStreamV1CompletionStatus = { - complete: 'complete', - error: 'error', - cancelled: 'cancelled', -} as const + "complete": "complete", + "error": "error", + "cancelled": "cancelled", +} as const; -export type MothershipStreamV1EventType = - | 'session' - | 'text' - | 'tool' - | 'span' - | 'resource' - | 'run' - | 'error' - | 'complete' +export type MothershipStreamV1EventType = "session" | "text" | "tool" | "span" | "resource" | "run" | "error" | "complete" export const MothershipStreamV1EventType = { - session: 'session', - text: 'text', - tool: 'tool', - span: 'span', - resource: 'resource', - run: 'run', - error: 'error', - complete: 'complete', -} as const + "session": "session", + "text": "text", + "tool": "tool", + "span": "span", + "resource": "resource", + "run": "run", + "error": "error", + "complete": "complete", +} as const; -export type MothershipStreamV1ResourceOp = 'upsert' | 'remove' +export type MothershipStreamV1ResourceOp = "upsert" | "remove" export const MothershipStreamV1ResourceOp = { - upsert: 'upsert', - remove: 'remove', -} as const + "upsert": "upsert", + "remove": "remove", +} as const; -export type MothershipStreamV1RunKind = - | 'checkpoint_pause' - | 'resumed' - | 'compaction_start' - | 'compaction_done' +export type MothershipStreamV1RunKind = "checkpoint_pause" | "resumed" | "compaction_start" | "compaction_done" export const MothershipStreamV1RunKind = { - checkpoint_pause: 'checkpoint_pause', - resumed: 'resumed', - compaction_start: 'compaction_start', - compaction_done: 'compaction_done', -} as const + "checkpoint_pause": "checkpoint_pause", + "resumed": "resumed", + "compaction_start": "compaction_start", + "compaction_done": "compaction_done", +} as const; -export type MothershipStreamV1SessionKind = 'trace' | 'chat' | 'title' | 'start' +export type MothershipStreamV1SessionKind = "trace" | "chat" | "title" | "start" export const MothershipStreamV1SessionKind = { - trace: 'trace', - chat: 'chat', - title: 'title', - start: 'start', -} as const + "trace": "trace", + "chat": "chat", + "title": "title", + "start": "start", +} as const; -export type MothershipStreamV1SpanKind = 'subagent' +export type MothershipStreamV1SpanKind = "subagent" export const MothershipStreamV1SpanKind = { - subagent: 'subagent', -} as const + "subagent": "subagent", +} as const; -export type MothershipStreamV1SpanLifecycleEvent = 'start' | 'end' +export type MothershipStreamV1SpanLifecycleEvent = "start" | "end" export const MothershipStreamV1SpanLifecycleEvent = { - start: 'start', - end: 'end', -} as const + "start": "start", + "end": "end", +} as const; -export type MothershipStreamV1SpanPayloadKind = 'subagent' | 'structured_result' | 'subagent_result' +export type MothershipStreamV1SpanPayloadKind = "subagent" | "structured_result" | "subagent_result" export const MothershipStreamV1SpanPayloadKind = { - subagent: 'subagent', - structured_result: 'structured_result', - subagent_result: 'subagent_result', -} as const + "subagent": "subagent", + "structured_result": "structured_result", + "subagent_result": "subagent_result", +} as const; export const MothershipStreamV1TextChannel = { - assistant: 'assistant', - thinking: 'thinking', -} as const + "assistant": "assistant", + "thinking": "thinking", +} as const; export const MothershipStreamV1ToolExecutor = { - go: 'go', - sim: 'sim', - client: 'client', -} as const + "go": "go", + "sim": "sim", + "client": "client", +} as const; export const MothershipStreamV1ToolMode = { - sync: 'sync', - async: 'async', -} as const + "sync": "sync", + "async": "async", +} as const; -export type MothershipStreamV1ToolOutcome = - | 'success' - | 'error' - | 'cancelled' - | 'skipped' - | 'rejected' +export type MothershipStreamV1ToolOutcome = "success" | "error" | "cancelled" | "skipped" | "rejected" export const MothershipStreamV1ToolOutcome = { - success: 'success', - error: 'error', - cancelled: 'cancelled', - skipped: 'skipped', - rejected: 'rejected', -} as const + "success": "success", + "error": "error", + "cancelled": "cancelled", + "skipped": "skipped", + "rejected": "rejected", +} as const; -export type MothershipStreamV1ToolPhase = 'call' | 'args_delta' | 'result' +export type MothershipStreamV1ToolPhase = "call" | "args_delta" | "result" export const MothershipStreamV1ToolPhase = { - call: 'call', - args_delta: 'args_delta', - result: 'result', -} as const + "call": "call", + "args_delta": "args_delta", + "result": "result", +} as const; export const MothershipStreamV1ToolStatus = { - generating: 'generating', - executing: 'executing', - success: 'success', - error: 'error', - cancelled: 'cancelled', - skipped: 'skipped', - rejected: 'rejected', -} as const + "generating": "generating", + "executing": "executing", + "success": "success", + "error": "error", + "cancelled": "cancelled", + "skipped": "skipped", + "rejected": "rejected", +} as const; + diff --git a/apps/sim/lib/copilot/request/go/fetch.test.ts b/apps/sim/lib/copilot/request/go/fetch.test.ts new file mode 100644 index 00000000000..446bed79673 --- /dev/null +++ b/apps/sim/lib/copilot/request/go/fetch.test.ts @@ -0,0 +1,91 @@ +import { trace } from "@opentelemetry/api"; +import { + BasicTracerProvider, + InMemorySpanExporter, + SimpleSpanProcessor, +} from "@opentelemetry/sdk-trace-base"; +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { fetchGo } from "@/lib/copilot/request/go/fetch"; + +describe("fetchGo", () => { + const exporter = new InMemorySpanExporter(); + const provider = new BasicTracerProvider({ + spanProcessors: [new SimpleSpanProcessor(exporter)], + }); + + beforeEach(() => { + exporter.reset(); + trace.setGlobalTracerProvider(provider); + vi.restoreAllMocks(); + }); + + it("emits a client span with http.* attrs and injects traceparent", async () => { + const fetchMock = vi + .fn() + .mockImplementation(async (_url: string, init: RequestInit) => { + const headers = init.headers as Record; + expect(headers.traceparent).toMatch( + /^00-[0-9a-f]{32}-[0-9a-f]{16}-0[0-9a-f]$/, + ); + return new Response("ok", { + status: 200, + headers: { "content-length": "2" }, + }); + }); + vi.stubGlobal("fetch", fetchMock); + + const res = await fetchGo("https://backend.example.com/api/copilot", { + method: "POST", + body: "payload", + operation: "stream", + attributes: { "copilot.leg": "sim_to_go" }, + }); + expect(res.status).toBe(200); + + const spans = exporter.getFinishedSpans(); + expect(spans).toHaveLength(1); + const attrs = spans[0].attributes; + expect(spans[0].name).toBe("sim → go /api/copilot"); + expect(attrs["http.method"]).toBe("POST"); + expect(attrs["http.url"]).toBe( + "https://backend.example.com/api/copilot", + ); + expect(attrs["http.target"]).toBe("/api/copilot"); + expect(attrs["http.status_code"]).toBe(200); + expect(attrs["copilot.operation"]).toBe("stream"); + expect(attrs["copilot.leg"]).toBe("sim_to_go"); + expect(typeof attrs["http.response.headers_ms"]).toBe("number"); + }); + + it("marks span as error on non-2xx response", async () => { + vi.stubGlobal( + "fetch", + vi.fn().mockResolvedValue(new Response("nope", { status: 500 })), + ); + + const res = await fetchGo("https://backend.example.com/api/tools/resume", { + method: "POST", + }); + expect(res.status).toBe(500); + + const spans = exporter.getFinishedSpans(); + expect(spans).toHaveLength(1); + expect(spans[0].status.code).toBe(2); + }); + + it("records exceptions when fetch throws", async () => { + vi.stubGlobal( + "fetch", + vi.fn().mockRejectedValue(new Error("network boom")), + ); + + await expect( + fetchGo("https://backend.example.com/api/traces", { method: "POST" }), + ).rejects.toThrow("network boom"); + + const spans = exporter.getFinishedSpans(); + expect(spans).toHaveLength(1); + expect(spans[0].status.code).toBe(2); + expect(spans[0].events.some((e) => e.name === "exception")).toBe(true); + }); +}); diff --git a/apps/sim/lib/copilot/request/go/fetch.ts b/apps/sim/lib/copilot/request/go/fetch.ts new file mode 100644 index 00000000000..6560c84f272 --- /dev/null +++ b/apps/sim/lib/copilot/request/go/fetch.ts @@ -0,0 +1,121 @@ +import { + context, + SpanStatusCode, + trace, + type Context, +} from "@opentelemetry/api"; +import { traceHeaders } from "@/lib/copilot/request/go/propagation"; + +// Lazy tracer resolution: module-level `trace.getTracer()` can be evaluated +// before `instrumentation-node.ts` installs the TracerProvider under +// Next.js 16 + Turbopack dev, freezing a NoOp tracer and silently dropping +// every outbound Sim → Go span. Resolving per-call avoids the race. +const getTracer = () => trace.getTracer("sim-copilot-http", "1.0.0"); + +export interface OutboundFetchOptions extends RequestInit { + otelContext?: Context; + spanName?: string; + operation?: string; + attributes?: Record; +} + +/** + * Perform an outbound Sim → Go fetch wrapped in an OTel child span so each + * call shows up as a distinct segment in Jaeger, and propagates the W3C + * traceparent so the Go-side span joins the same trace. + * + * The span captures generic attributes (method, status, duration, response + * size, error code) so any future latency investigation — not just images or + * Bedrock — has uniform metadata to work with. + */ +export async function fetchGo( + url: string, + options: OutboundFetchOptions = {}, +): Promise { + const { + otelContext, + spanName, + operation, + attributes, + headers: providedHeaders, + ...init + } = options; + + const parsed = safeParseUrl(url); + const pathname = parsed?.pathname ?? url; + const method = (init.method ?? "GET").toUpperCase(); + const parentContext = otelContext ?? context.active(); + + const span = getTracer().startSpan( + spanName ?? `sim → go ${pathname}`, + { + attributes: { + "http.method": method, + "http.url": url, + "http.target": pathname, + "net.peer.name": parsed?.host ?? "", + "copilot.leg": "sim_to_go", + ...(operation ? { "copilot.operation": operation } : {}), + ...(attributes ?? {}), + }, + }, + parentContext, + ); + + const activeContext = trace.setSpan(parentContext, span); + const propagatedHeaders = traceHeaders({}, activeContext); + const mergedHeaders = { + ...(providedHeaders as Record | undefined), + ...propagatedHeaders, + }; + + const start = performance.now(); + try { + const response = await context.with(activeContext, () => + fetch(url, { + ...init, + method, + headers: mergedHeaders, + }), + ); + const elapsedMs = performance.now() - start; + const contentLength = Number(response.headers.get("content-length") ?? 0); + span.setAttribute("http.status_code", response.status); + span.setAttribute("http.response.headers_ms", Math.round(elapsedMs)); + if (contentLength > 0) { + span.setAttribute("http.response.content_length", contentLength); + } + if (response.status >= 400) { + span.setStatus({ + code: SpanStatusCode.ERROR, + message: `HTTP ${response.status}`, + }); + } else { + span.setStatus({ code: SpanStatusCode.OK }); + } + return response; + } catch (error) { + span.setAttribute( + "http.response.headers_ms", + Math.round(performance.now() - start), + ); + span.setStatus({ + code: SpanStatusCode.ERROR, + message: error instanceof Error ? error.message : String(error), + }); + span.recordException( + error instanceof Error ? error : new Error(String(error)), + ); + throw error; + } finally { + span.end(); + } +} + +function safeParseUrl(url: string): URL | null { + try { + return new URL(url); + } catch { + return null; + } +} diff --git a/apps/sim/lib/copilot/request/go/propagation.ts b/apps/sim/lib/copilot/request/go/propagation.ts new file mode 100644 index 00000000000..2268049bc6d --- /dev/null +++ b/apps/sim/lib/copilot/request/go/propagation.ts @@ -0,0 +1,26 @@ +import { context, type Context } from "@opentelemetry/api"; +import { W3CTraceContextPropagator } from "@opentelemetry/core"; + +const propagator = new W3CTraceContextPropagator(); +const headerSetter = { + set(carrier: Record, key: string, value: string) { + carrier[key] = value; + }, +}; + +/** + * Injects W3C trace context (traceparent, tracestate) into outbound HTTP + * headers so Go-side spans join the same OTel trace tree as the calling + * Sim span. + * + * Usage: spread the result into your fetch headers: + * fetch(url, { headers: { ...myHeaders, ...traceHeaders() } }) + */ +export function traceHeaders( + carrier?: Record, + otelContext?: Context, +): Record { + const headers: Record = carrier ?? {}; + propagator.inject(otelContext ?? context.active(), headers, headerSetter); + return headers; +} diff --git a/apps/sim/lib/copilot/request/go/stream.test.ts b/apps/sim/lib/copilot/request/go/stream.test.ts index 64349636b51..1ea855d47da 100644 --- a/apps/sim/lib/copilot/request/go/stream.test.ts +++ b/apps/sim/lib/copilot/request/go/stream.test.ts @@ -1,7 +1,7 @@ /** * @vitest-environment node */ -import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, @@ -9,57 +9,65 @@ import { MothershipStreamV1ToolMode, MothershipStreamV1ToolOutcome, MothershipStreamV1ToolPhase, -} from '@/lib/copilot/generated/mothership-stream-v1' +} from "@/lib/copilot/generated/mothership-stream-v1"; import { buildPreviewContentUpdate, decodeJsonStringPrefix, extractEditContent, runStreamLoop, -} from '@/lib/copilot/request/go/stream' -import { createEvent } from '@/lib/copilot/request/session' -import { TraceCollector } from '@/lib/copilot/request/trace' -import type { ExecutionContext, StreamingContext } from '@/lib/copilot/request/types' +} from "@/lib/copilot/request/go/stream"; +import { createEvent } from "@/lib/copilot/request/session"; +import { + RequestTraceV1Outcome, + TraceCollector, +} from "@/lib/copilot/request/trace"; +import type { + ExecutionContext, + StreamingContext, +} from "@/lib/copilot/request/types"; function createSseResponse(events: unknown[]): Response { - const payload = events.map((event) => `data: ${JSON.stringify(event)}\n\n`).join('') + const payload = events + .map((event) => `data: ${JSON.stringify(event)}\n\n`) + .join(""); return new Response( new ReadableStream({ start(controller) { - controller.enqueue(new TextEncoder().encode(payload)) - controller.close() + controller.enqueue(new TextEncoder().encode(payload)); + controller.close(); }, }), { status: 200, headers: { - 'Content-Type': 'text/event-stream', + "Content-Type": "text/event-stream", }, - } - ) + }, + ); } function createRawSseResponse(payload: string): Response { return new Response( new ReadableStream({ start(controller) { - controller.enqueue(new TextEncoder().encode(payload)) - controller.close() + controller.enqueue(new TextEncoder().encode(payload)); + controller.close(); }, }), { status: 200, headers: { - 'Content-Type': 'text/event-stream', + "Content-Type": "text/event-stream", }, - } - ) + }, + ); } function createStreamingContext(): StreamingContext { return { - messageId: 'msg-1', - accumulatedContent: '', + messageId: "msg-1", + accumulatedContent: "", contentBlocks: [], toolCalls: new Map(), pendingToolPromises: new Map(), @@ -70,215 +78,310 @@ function createStreamingContext(): StreamingContext { subAgentParentStack: [], subAgentContent: {}, subAgentToolCalls: {}, - pendingContent: '', + pendingContent: "", streamComplete: false, wasAborted: false, errors: [], activeFileIntent: null, trace: new TraceCollector(), - } + }; } -describe('copilot go stream helpers', () => { +describe("copilot go stream helpers", () => { beforeEach(() => { - vi.stubGlobal('fetch', vi.fn()) - }) + vi.stubGlobal("fetch", vi.fn()); + }); afterEach(() => { - vi.unstubAllGlobals() - }) + vi.unstubAllGlobals(); + }); - it('decodes complete escapes and stops at incomplete unicode escapes', () => { - expect(decodeJsonStringPrefix('hello\\nworld')).toBe('hello\nworld') - expect(decodeJsonStringPrefix('emoji \\u263A')).toBe('emoji ☺') - expect(decodeJsonStringPrefix('partial \\u26')).toBe('partial ') - }) + it("decodes complete escapes and stops at incomplete unicode escapes", () => { + expect(decodeJsonStringPrefix("hello\\nworld")).toBe("hello\nworld"); + expect(decodeJsonStringPrefix("emoji \\u263A")).toBe("emoji ☺"); + expect(decodeJsonStringPrefix("partial \\u26")).toBe("partial "); + }); - it('extracts the streamed edit_content prefix from partial JSON', () => { - expect(extractEditContent('{"content":"hello\\nwor')).toBe('hello\nwor') - expect(extractEditContent('{"content":"tab\\tvalue"}')).toBe('tab\tvalue') - }) + it("extracts the streamed edit_content prefix from partial JSON", () => { + expect(extractEditContent('{"content":"hello\\nwor')).toBe("hello\nwor"); + expect(extractEditContent('{"content":"tab\\tvalue"}')).toBe("tab\tvalue"); + }); - it('emits full snapshots for append (sidebar viewer uses replace mode; no delta merge)', () => { - expect(buildPreviewContentUpdate('hello', 'hello world', 100, 200, 'append')).toEqual({ - content: 'hello world', - contentMode: 'snapshot', + it("emits full snapshots for append (sidebar viewer uses replace mode; no delta merge)", () => { + expect( + buildPreviewContentUpdate("hello", "hello world", 100, 200, "append"), + ).toEqual({ + content: "hello world", + contentMode: "snapshot", lastSnapshotAt: 200, - }) - }) + }); + }); - it('emits deltas for update when the preview extends the previous text', () => { - expect(buildPreviewContentUpdate('hello', 'hello world', 100, 200, 'update')).toEqual({ - content: ' world', - contentMode: 'delta', + it("emits deltas for update when the preview extends the previous text", () => { + expect( + buildPreviewContentUpdate("hello", "hello world", 100, 200, "update"), + ).toEqual({ + content: " world", + contentMode: "delta", lastSnapshotAt: 100, - }) - }) + }); + }); - it('falls back to snapshots for patches and divergent content', () => { - expect(buildPreviewContentUpdate('hello', 'goodbye', 100, 200, 'update')).toEqual({ - content: 'goodbye', - contentMode: 'snapshot', + it("falls back to snapshots for patches and divergent content", () => { + expect( + buildPreviewContentUpdate("hello", "goodbye", 100, 200, "update"), + ).toEqual({ + content: "goodbye", + contentMode: "snapshot", lastSnapshotAt: 200, - }) + }); - expect(buildPreviewContentUpdate('hello', 'hello world', 100, 200, 'patch')).toEqual({ - content: 'hello world', - contentMode: 'snapshot', + expect( + buildPreviewContentUpdate("hello", "hello world", 100, 200, "patch"), + ).toEqual({ + content: "hello world", + contentMode: "snapshot", lastSnapshotAt: 200, - }) - }) + }); + }); - it('drops duplicate tool_result events before forwarding them', async () => { + it("drops duplicate tool_result events before forwarding them", async () => { const toolResult = createEvent({ - streamId: 'stream-1', - cursor: '1', + streamId: "stream-1", + cursor: "1", seq: 1, - requestId: 'req-1', + requestId: "req-1", type: MothershipStreamV1EventType.tool, payload: { - toolCallId: 'tool-result-dedupe', - toolName: 'search_online', + toolCallId: "tool-result-dedupe", + toolName: "search_online", executor: MothershipStreamV1ToolExecutor.sim, mode: MothershipStreamV1ToolMode.async, phase: MothershipStreamV1ToolPhase.result, success: true, - output: { value: 'ok' }, + output: { value: "ok" }, }, - }) + }); const complete = createEvent({ - streamId: 'stream-1', - cursor: '2', + streamId: "stream-1", + cursor: "2", seq: 2, - requestId: 'req-1', + requestId: "req-1", type: MothershipStreamV1EventType.complete, payload: { status: MothershipStreamV1CompletionStatus.complete, }, - }) + }); - vi.mocked(fetch).mockResolvedValueOnce(createSseResponse([toolResult, toolResult, complete])) + vi.mocked(fetch).mockResolvedValueOnce( + createSseResponse([toolResult, toolResult, complete]), + ); - const onEvent = vi.fn() - const context = createStreamingContext() + const onEvent = vi.fn(); + const context = createStreamingContext(); const execContext: ExecutionContext = { - userId: 'user-1', - workflowId: 'workflow-1', - } + userId: "user-1", + workflowId: "workflow-1", + }; - await runStreamLoop('https://example.com/mothership/stream', {}, context, execContext, { - onEvent, - timeout: 1000, - }) + await runStreamLoop( + "https://example.com/mothership/stream", + {}, + context, + execContext, + { + onEvent, + timeout: 1000, + }, + ); expect(onEvent.mock.calls.map(([event]) => event.type)).toEqual([ MothershipStreamV1EventType.tool, MothershipStreamV1EventType.complete, - ]) + ]); expect(onEvent).toHaveBeenCalledWith( expect.objectContaining({ type: MothershipStreamV1EventType.tool, payload: expect.objectContaining({ - toolCallId: 'tool-result-dedupe', + toolCallId: "tool-result-dedupe", phase: MothershipStreamV1ToolPhase.result, }), - }) - ) - expect(context.toolCalls.get('tool-result-dedupe')).toEqual( + }), + ); + expect(context.toolCalls.get("tool-result-dedupe")).toEqual( expect.objectContaining({ - id: 'tool-result-dedupe', - name: 'search_online', + id: "tool-result-dedupe", + name: "search_online", status: MothershipStreamV1ToolOutcome.success, - result: { success: true, output: { value: 'ok' } }, - }) - ) - }) + result: { success: true, output: { value: "ok" } }, + }), + ); + }); - it('fails closed when the shared stream ends before a terminal event', async () => { + it("fails closed when the shared stream ends before a terminal event", async () => { const textEvent = createEvent({ - streamId: 'stream-1', - cursor: '1', + streamId: "stream-1", + cursor: "1", seq: 1, - requestId: 'req-1', + requestId: "req-1", type: MothershipStreamV1EventType.text, payload: { - channel: 'assistant', - text: 'partial response', + channel: "assistant", + text: "partial response", }, - }) + }); - vi.mocked(fetch).mockResolvedValueOnce(createSseResponse([textEvent])) + vi.mocked(fetch).mockResolvedValueOnce(createSseResponse([textEvent])); - const context = createStreamingContext() + const context = createStreamingContext(); const execContext: ExecutionContext = { - userId: 'user-1', - workflowId: 'workflow-1', - } + userId: "user-1", + workflowId: "workflow-1", + }; await expect( - runStreamLoop('https://example.com/mothership/stream', {}, context, execContext, { - timeout: 1000, - }) - ).rejects.toThrow('Copilot backend stream ended before a terminal event') + runStreamLoop( + "https://example.com/mothership/stream", + {}, + context, + execContext, + { + timeout: 1000, + }, + ), + ).rejects.toThrow("Copilot backend stream ended before a terminal event"); expect( context.errors.some((message) => - message.includes('Copilot backend stream ended before a terminal event') - ) - ).toBe(true) - }) + message.includes( + "Copilot backend stream ended before a terminal event", + ), + ), + ).toBe(true); + }); - it('fails closed when the shared stream receives an invalid event', async () => { + it("fails closed when the shared stream receives an invalid event", async () => { vi.mocked(fetch).mockResolvedValueOnce( createSseResponse([ { v: 1, type: MothershipStreamV1EventType.tool, seq: 1, - ts: '2026-01-01T00:00:00.000Z', - stream: { streamId: 'stream-1', cursor: '1' }, + ts: "2026-01-01T00:00:00.000Z", + stream: { streamId: "stream-1", cursor: "1" }, payload: { phase: MothershipStreamV1ToolPhase.result, }, }, - ]) - ) + ]), + ); - const context = createStreamingContext() + const context = createStreamingContext(); const execContext: ExecutionContext = { - userId: 'user-1', - workflowId: 'workflow-1', - } + userId: "user-1", + workflowId: "workflow-1", + }; await expect( - runStreamLoop('https://example.com/mothership/stream', {}, context, execContext, { - timeout: 1000, - }) - ).rejects.toThrow('Received invalid stream event on shared path') + runStreamLoop( + "https://example.com/mothership/stream", + {}, + context, + execContext, + { + timeout: 1000, + }, + ), + ).rejects.toThrow("Received invalid stream event on shared path"); expect( context.errors.some((message) => - message.includes('Received invalid stream event on shared path') - ) - ).toBe(true) - }) + message.includes("Received invalid stream event on shared path"), + ), + ).toBe(true); + }); - it('fails closed when the shared stream receives malformed JSON', async () => { + it("fails closed when the shared stream receives malformed JSON", async () => { vi.mocked(fetch).mockResolvedValueOnce( - createRawSseResponse('data: {"v":1,"type":"text","payload":\n\n') - ) + createRawSseResponse('data: {"v":1,"type":"text","payload":\n\n'), + ); - const context = createStreamingContext() + const context = createStreamingContext(); const execContext: ExecutionContext = { - userId: 'user-1', - workflowId: 'workflow-1', - } + userId: "user-1", + workflowId: "workflow-1", + }; await expect( - runStreamLoop('https://example.com/mothership/stream', {}, context, execContext, { + runStreamLoop( + "https://example.com/mothership/stream", + {}, + context, + execContext, + { + timeout: 1000, + }, + ), + ).rejects.toThrow("Failed to parse SSE event JSON"); + expect( + context.errors.some((message) => + message.includes("Failed to parse SSE event JSON"), + ), + ).toBe(true); + }); + + it("records a split canonical request id and go trace id from the stream envelope", async () => { + vi.mocked(fetch).mockResolvedValueOnce( + createSseResponse([ + { + v: 1, + type: MothershipStreamV1EventType.text, + seq: 1, + ts: "2026-01-01T00:00:00.000Z", + stream: { streamId: "stream-1", cursor: "1" }, + trace: { + requestId: "sim-request-1", + goTraceId: "go-trace-1", + }, + payload: { + channel: "assistant", + text: "hello", + }, + }, + createEvent({ + streamId: "stream-1", + cursor: "2", + seq: 2, + requestId: "sim-request-1", + type: MothershipStreamV1EventType.complete, + payload: { + status: MothershipStreamV1CompletionStatus.complete, + }, + }), + ]), + ); + + const context = createStreamingContext(); + context.requestId = "sim-request-1"; + const execContext: ExecutionContext = { + userId: "user-1", + workflowId: "workflow-1", + }; + + await runStreamLoop( + "https://example.com/mothership/stream", + {}, + context, + execContext, + { timeout: 1000, - }) - ).rejects.toThrow('Failed to parse SSE event JSON') + }, + ); + + expect(context.requestId).toBe("sim-request-1"); expect( - context.errors.some((message) => message.includes('Failed to parse SSE event JSON')) - ).toBe(true) - }) -}) + context.trace.build({ + outcome: RequestTraceV1Outcome.success, + simRequestId: "sim-request-1", + }).goTraceId, + ).toBe("go-trace-1"); + }); +}); diff --git a/apps/sim/lib/copilot/request/go/stream.ts b/apps/sim/lib/copilot/request/go/stream.ts index 32d1c47b15b..1844f08c8f9 100644 --- a/apps/sim/lib/copilot/request/go/stream.ts +++ b/apps/sim/lib/copilot/request/go/stream.ts @@ -1,80 +1,93 @@ -import { createLogger } from '@sim/logger' -import { ORCHESTRATION_TIMEOUT_MS } from '@/lib/copilot/constants' -import { MothershipStreamV1SpanLifecycleEvent } from '@/lib/copilot/generated/mothership-stream-v1' +import type { Context } from "@opentelemetry/api"; +import { createLogger } from "@sim/logger"; +import { ORCHESTRATION_TIMEOUT_MS } from "@/lib/copilot/constants"; +import { fetchGo } from "@/lib/copilot/request/go/fetch"; +import { MothershipStreamV1SpanLifecycleEvent } from "@/lib/copilot/generated/mothership-stream-v1"; import { buildPreviewContentUpdate, createFilePreviewAdapterState, decodeJsonStringPrefix, extractEditContent, processFilePreviewStreamEvent, -} from '@/lib/copilot/request/go/file-preview-adapter' -import { FatalSseEventError, processSSEStream } from '@/lib/copilot/request/go/parser' +} from "@/lib/copilot/request/go/file-preview-adapter"; +import { + FatalSseEventError, + processSSEStream, +} from "@/lib/copilot/request/go/parser"; import { handleSubagentRouting, sseHandlers, subAgentHandlers, -} from '@/lib/copilot/request/handlers' +} from "@/lib/copilot/request/handlers"; import { eventToStreamEvent, isSubagentSpanStreamEvent, parsePersistedStreamEventEnvelope, -} from '@/lib/copilot/request/session' -import { shouldSkipToolCallEvent, shouldSkipToolResultEvent } from '@/lib/copilot/request/sse-utils' +} from "@/lib/copilot/request/session"; +import { + shouldSkipToolCallEvent, + shouldSkipToolResultEvent, +} from "@/lib/copilot/request/sse-utils"; import type { ExecutionContext, OrchestratorOptions, StreamEvent, StreamingContext, -} from '@/lib/copilot/request/types' +} from "@/lib/copilot/request/types"; -const logger = createLogger('CopilotGoStream') +const logger = createLogger("CopilotGoStream"); -export { buildPreviewContentUpdate, decodeJsonStringPrefix, extractEditContent } +export { + buildPreviewContentUpdate, + decodeJsonStringPrefix, + extractEditContent, +}; -type JsonRecord = Record +type JsonRecord = Record; type SubagentSpanData = { - pending?: boolean - toolCallId?: string -} + pending?: boolean; + toolCallId?: string; +}; function asJsonRecord(value: unknown): JsonRecord | undefined { - return value && typeof value === 'object' && !Array.isArray(value) + return value && typeof value === "object" && !Array.isArray(value) ? (value as JsonRecord) - : undefined + : undefined; } function parseSubagentSpanData(value: unknown): SubagentSpanData | undefined { - const data = asJsonRecord(value) + const data = asJsonRecord(value); if (!data) { - return undefined + return undefined; } - const toolCallId = typeof data.tool_call_id === 'string' ? data.tool_call_id : undefined - const pending = typeof data.pending === 'boolean' ? data.pending : undefined + const toolCallId = + typeof data.tool_call_id === "string" ? data.tool_call_id : undefined; + const pending = typeof data.pending === "boolean" ? data.pending : undefined; return { ...(toolCallId ? { toolCallId } : {}), ...(pending !== undefined ? { pending } : {}), - } + }; } export class CopilotBackendError extends Error { - status?: number - body?: string + status?: number; + body?: string; constructor(message: string, options?: { status?: number; body?: string }) { - super(message) - this.name = 'CopilotBackendError' - this.status = options?.status - this.body = options?.body + super(message); + this.name = "CopilotBackendError"; + this.status = options?.status; + this.body = options?.body; } } export class BillingLimitError extends Error { constructor(public readonly userId: string) { - super('Usage limit reached') - this.name = 'BillingLimitError' + super("Usage limit reached"); + this.name = "BillingLimitError"; } } @@ -86,11 +99,15 @@ export interface StreamLoopOptions extends OrchestratorOptions { * Called for each normalized event BEFORE standard handler dispatch. * Return true to skip the default handler for this event. */ - onBeforeDispatch?: (event: StreamEvent, context: StreamingContext) => boolean | undefined + onBeforeDispatch?: ( + event: StreamEvent, + context: StreamingContext, + ) => boolean | undefined; /** * Called when the Go backend's trace ID (go_trace_id) is first received via SSE. */ - onGoTraceId?: (goTraceId: string) => void + onGoTraceId?: (goTraceId: string) => void; + otelContext?: Context; } /** @@ -105,85 +122,132 @@ export async function runStreamLoop( fetchOptions: RequestInit, context: StreamingContext, execContext: ExecutionContext, - options: StreamLoopOptions + options: StreamLoopOptions, ): Promise { - const { timeout = ORCHESTRATION_TIMEOUT_MS, abortSignal } = options - const filePreviewAdapterState = createFilePreviewAdapterState() + const { timeout = ORCHESTRATION_TIMEOUT_MS, abortSignal } = options; + const filePreviewAdapterState = createFilePreviewAdapterState(); + const pathname = new URL(fetchUrl).pathname; + const requestBodyBytes = estimateBodyBytes(fetchOptions.body); const fetchSpan = context.trace.startSpan( - `HTTP Request → ${new URL(fetchUrl).pathname}`, - 'sim.http.fetch', - { url: fetchUrl } - ) - const response = await fetch(fetchUrl, { + `HTTP Request → ${pathname}`, + "sim.http.fetch", + { + url: fetchUrl, + method: fetchOptions.method ?? "GET", + requestBodyBytes, + }, + ); + const fetchStart = performance.now(); + const response = await fetchGo(fetchUrl, { ...fetchOptions, signal: abortSignal, - }) + otelContext: options.otelContext, + spanName: `sim → go ${pathname}`, + operation: "stream", + attributes: { + "copilot.stream": true, + ...(requestBodyBytes + ? { "http.request.content_length": requestBodyBytes } + : {}), + }, + }); + const headersElapsedMs = Math.round(performance.now() - fetchStart); + fetchSpan.attributes = { + ...(fetchSpan.attributes ?? {}), + status: response.status, + headersMs: headersElapsedMs, + }; if (!response.ok) { - context.trace.endSpan(fetchSpan, 'error') - const errorText = await response.text().catch(() => '') + context.trace.endSpan(fetchSpan, "error"); + const errorText = await response.text().catch(() => ""); if (response.status === 402) { - throw new BillingLimitError(execContext.userId) + throw new BillingLimitError(execContext.userId); } throw new CopilotBackendError( `Copilot backend error (${response.status}): ${errorText || response.statusText}`, - { status: response.status, body: errorText || response.statusText } - ) + { status: response.status, body: errorText || response.statusText }, + ); } if (!response.body) { - context.trace.endSpan(fetchSpan, 'error') - throw new CopilotBackendError('Copilot backend response missing body') + context.trace.endSpan(fetchSpan, "error"); + throw new CopilotBackendError("Copilot backend response missing body"); } - context.trace.endSpan(fetchSpan) - const reader = response.body.getReader() - const decoder = new TextDecoder() + context.trace.endSpan(fetchSpan); + + const bodySpan = context.trace.startSpan( + `SSE Body → ${pathname}`, + "sim.http.stream_body", + { + url: fetchUrl, + method: fetchOptions.method ?? "GET", + }, + ); + const bodyStart = performance.now(); + let firstEventMs: number | undefined; + let eventsReceived = 0; + let bytesReceived = 0; + let endedOn: string = "terminal"; + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); const timeoutId = setTimeout(() => { - context.errors.push('Request timed out') - context.streamComplete = true - reader.cancel().catch(() => {}) - }, timeout) + context.errors.push("Request timed out"); + context.streamComplete = true; + endedOn = "timeout"; + reader.cancel().catch(() => {}); + }, timeout); try { await processSSEStream(reader, decoder, abortSignal, async (raw) => { + if (eventsReceived === 0) { + firstEventMs = Math.round(performance.now() - bodyStart); + } + eventsReceived += 1; + try { + bytesReceived += JSON.stringify(raw ?? null).length; + } catch { + // non-serializable event; skip byte accounting + } if (abortSignal?.aborted) { - context.wasAborted = true - return true + context.wasAborted = true; + return true; } - const parsedEvent = parsePersistedStreamEventEnvelope(raw) + const parsedEvent = parsePersistedStreamEventEnvelope(raw); if (!parsedEvent.ok) { const detail = [parsedEvent.message, ...(parsedEvent.errors ?? [])] .filter(Boolean) - .join('; ') - const failureMessage = `Received invalid stream event on shared path: ${detail}` - context.errors.push(failureMessage) - logger.error('Received invalid stream event on shared path', { + .join("; "); + const failureMessage = `Received invalid stream event on shared path: ${detail}`; + context.errors.push(failureMessage); + logger.error("Received invalid stream event on shared path", { reason: parsedEvent.reason, message: parsedEvent.message, errors: parsedEvent.errors, - }) - throw new FatalSseEventError(failureMessage) + }); + throw new FatalSseEventError(failureMessage); } - const envelope = parsedEvent.event - const streamEvent = eventToStreamEvent(envelope) + const envelope = parsedEvent.event; + const streamEvent = eventToStreamEvent(envelope); if (envelope.trace?.requestId) { - const prev = context.requestId - context.requestId = envelope.trace.requestId - context.trace.setGoTraceId(envelope.trace.requestId) - if (envelope.trace.requestId !== prev) { - options.onGoTraceId?.(envelope.trace.requestId) - } + const goTraceId = envelope.trace.goTraceId || envelope.trace.requestId; + context.trace.setGoTraceId(goTraceId); + options.onGoTraceId?.(goTraceId); } - if (shouldSkipToolCallEvent(streamEvent) || shouldSkipToolResultEvent(streamEvent)) { - return + if ( + shouldSkipToolCallEvent(streamEvent) || + shouldSkipToolResultEvent(streamEvent) + ) { + return; } await processFilePreviewStreamEvent({ @@ -193,112 +257,166 @@ export async function runStreamLoop( execContext, options, state: filePreviewAdapterState, - }) + }); try { - await options.onEvent?.(streamEvent) + await options.onEvent?.(streamEvent); } catch (error) { - logger.warn('Failed to forward stream event', { + logger.warn("Failed to forward stream event", { type: streamEvent.type, error: error instanceof Error ? error.message : String(error), - }) + }); } // Yield a macrotask so Node.js flushes the HTTP response buffer to // the browser. Microtask yields (await Promise.resolve()) are not // enough — the I/O layer needs a full event loop tick to write. - await new Promise((resolve) => setImmediate(resolve)) + await new Promise((resolve) => setImmediate(resolve)); if (options.onBeforeDispatch?.(streamEvent, context)) { - return context.streamComplete || undefined + return context.streamComplete || undefined; } if (isSubagentSpanStreamEvent(streamEvent)) { - const spanData = parseSubagentSpanData(streamEvent.payload.data) - const toolCallId = streamEvent.scope?.parentToolCallId || spanData?.toolCallId - const subagentName = streamEvent.payload.agent - const spanEvt = streamEvent.payload.event - const isPendingPause = spanData?.pending === true + const spanData = parseSubagentSpanData(streamEvent.payload.data); + const toolCallId = + streamEvent.scope?.parentToolCallId || spanData?.toolCallId; + const subagentName = streamEvent.payload.agent; + const spanEvt = streamEvent.payload.event; + const isPendingPause = spanData?.pending === true; if (spanEvt === MothershipStreamV1SpanLifecycleEvent.start) { - const lastParent = context.subAgentParentStack[context.subAgentParentStack.length - 1] - const lastBlock = context.contentBlocks[context.contentBlocks.length - 1] + const lastParent = + context.subAgentParentStack[context.subAgentParentStack.length - 1]; + const lastBlock = + context.contentBlocks[context.contentBlocks.length - 1]; if (toolCallId) { if (lastParent !== toolCallId) { - context.subAgentParentStack.push(toolCallId) + context.subAgentParentStack.push(toolCallId); } - context.subAgentParentToolCallId = toolCallId - context.subAgentContent[toolCallId] ??= '' - context.subAgentToolCalls[toolCallId] ??= [] + context.subAgentParentToolCallId = toolCallId; + context.subAgentContent[toolCallId] ??= ""; + context.subAgentToolCalls[toolCallId] ??= []; } if ( subagentName && !( lastParent === toolCallId && - lastBlock?.type === 'subagent' && + lastBlock?.type === "subagent" && lastBlock.content === subagentName ) ) { context.contentBlocks.push({ - type: 'subagent', + type: "subagent", content: subagentName, timestamp: Date.now(), - }) + }); } - return + return; } if (spanEvt === MothershipStreamV1SpanLifecycleEvent.end) { if (isPendingPause) { - return + return; } if (context.subAgentParentStack.length > 0) { - context.subAgentParentStack.pop() + context.subAgentParentStack.pop(); } else { - logger.warn('subagent end without matching start') + logger.warn("subagent end without matching start"); } context.subAgentParentToolCallId = context.subAgentParentStack.length > 0 - ? context.subAgentParentStack[context.subAgentParentStack.length - 1] - : undefined - return + ? context.subAgentParentStack[ + context.subAgentParentStack.length - 1 + ] + : undefined; + return; } } if (handleSubagentRouting(streamEvent, context)) { - const handler = subAgentHandlers[streamEvent.type] + const handler = subAgentHandlers[streamEvent.type]; if (handler) { - await handler(streamEvent, context, execContext, options) + await handler(streamEvent, context, execContext, options); } - return context.streamComplete || undefined + return context.streamComplete || undefined; } - const handler = sseHandlers[streamEvent.type] + const handler = sseHandlers[streamEvent.type]; if (handler) { - await handler(streamEvent, context, execContext, options) + await handler(streamEvent, context, execContext, options); } - return context.streamComplete || undefined - }) - - if (!context.streamComplete && !abortSignal?.aborted && !context.wasAborted) { - const streamPath = new URL(fetchUrl).pathname - const message = `Copilot backend stream ended before a terminal event on ${streamPath}` - context.errors.push(message) - logger.error('Copilot backend stream ended before a terminal event', { + return context.streamComplete || undefined; + }); + + if ( + !context.streamComplete && + !abortSignal?.aborted && + !context.wasAborted + ) { + const streamPath = new URL(fetchUrl).pathname; + const message = `Copilot backend stream ended before a terminal event on ${streamPath}`; + context.errors.push(message); + logger.error("Copilot backend stream ended before a terminal event", { path: streamPath, requestId: context.requestId, messageId: context.messageId, - }) - throw new CopilotBackendError(message, { status: 503 }) + }); + endedOn = "closed_no_terminal"; + throw new CopilotBackendError(message, { status: 503 }); } } catch (error) { - if (error instanceof FatalSseEventError && !context.errors.includes(error.message)) { - context.errors.push(error.message) + if ( + error instanceof FatalSseEventError && + !context.errors.includes(error.message) + ) { + context.errors.push(error.message); + } + if (endedOn === "terminal") { + endedOn = + error instanceof CopilotBackendError + ? "backend_error" + : error instanceof BillingLimitError + ? "billing_limit" + : "error"; } - throw error + throw error; } finally { if (abortSignal?.aborted) { - context.wasAborted = true - await reader.cancel().catch(() => {}) + context.wasAborted = true; + await reader.cancel().catch(() => {}); + if (endedOn === "terminal") { + endedOn = "aborted"; + } } - clearTimeout(timeoutId) + clearTimeout(timeoutId); + + const bodyDurationMs = Math.round(performance.now() - bodyStart); + bodySpan.attributes = { + ...(bodySpan.attributes ?? {}), + eventsReceived, + bytesReceived, + firstEventMs, + endedOn, + durationMs: bodyDurationMs, + }; + context.trace.endSpan( + bodySpan, + endedOn === "terminal" ? "ok" : endedOn === "aborted" ? "cancelled" : "error", + ); + } +} + +function estimateBodyBytes(body: BodyInit | null | undefined): number { + if (!body) { + return 0; + } + if (typeof body === "string") { + return body.length; + } + if (body instanceof ArrayBuffer) { + return body.byteLength; + } + if (ArrayBuffer.isView(body)) { + return body.byteLength; } + return 0; } diff --git a/apps/sim/lib/copilot/request/handlers/span.ts b/apps/sim/lib/copilot/request/handlers/span.ts index e684b232582..7c09d8fc5b1 100644 --- a/apps/sim/lib/copilot/request/handlers/span.ts +++ b/apps/sim/lib/copilot/request/handlers/span.ts @@ -1,3 +1,70 @@ +import { + MothershipStreamV1SpanLifecycleEvent, + MothershipStreamV1SpanPayloadKind, +} from '@/lib/copilot/generated/mothership-stream-v1' import type { StreamHandler } from './types' -export const handleSpanEvent: StreamHandler = () => {} +/** + * Mirror Go-emitted span lifecycle events onto the Sim-side TraceCollector. + * + * Go publishes `span` events for subagent lifecycles and structured-result + * payloads. For subagents, the start/end pair is also used for UI routing + * elsewhere; here we additionally record a named span on the trace collector + * so the final RequestTraceV1 report shows the full nested structure without + * requiring the reader to inspect the raw envelope stream. + */ +export const handleSpanEvent: StreamHandler = (event, context) => { + if (event.type !== 'span') { + return + } + + const payload = event.payload as { + kind?: string + event?: string + agent?: string + data?: unknown + } + const kind = payload?.kind ?? '' + const evt = payload?.event ?? '' + + if (kind === MothershipStreamV1SpanPayloadKind.subagent) { + const scopeAgent = + typeof payload.agent === 'string' && payload.agent ? payload.agent : 'subagent' + if (evt === MothershipStreamV1SpanLifecycleEvent.start) { + const span = context.trace.startSpan( + `subagent:${scopeAgent}`, + 'go.subagent', + { + agent: scopeAgent, + parentToolCallId: event.scope?.parentToolCallId, + }, + ) + context.subAgentTraceSpans ??= new Map() + context.subAgentTraceSpans.set(scopeAgent + ':' + (event.scope?.parentToolCallId || ''), span) + } else if (evt === MothershipStreamV1SpanLifecycleEvent.end) { + const key = scopeAgent + ':' + (event.scope?.parentToolCallId || '') + const span = context.subAgentTraceSpans?.get(key) + if (span) { + context.trace.endSpan(span, 'ok') + context.subAgentTraceSpans?.delete(key) + } + } + return + } + + if ( + kind === MothershipStreamV1SpanPayloadKind.structured_result || + kind === MothershipStreamV1SpanPayloadKind.subagent_result + ) { + const span = context.trace.startSpan( + `${kind}:${payload.agent ?? 'main'}`, + `go.${kind}`, + { + agent: payload.agent, + hasData: payload.data !== undefined, + }, + ) + context.trace.endSpan(span, 'ok') + return + } +} diff --git a/apps/sim/lib/copilot/request/lifecycle/finalize.ts b/apps/sim/lib/copilot/request/lifecycle/finalize.ts index 676fa7bd176..f242c30831e 100644 --- a/apps/sim/lib/copilot/request/lifecycle/finalize.ts +++ b/apps/sim/lib/copilot/request/lifecycle/finalize.ts @@ -1,3 +1,4 @@ +import { SpanStatusCode, trace } from '@opentelemetry/api' import { createLogger } from '@sim/logger' import { updateRunStatus } from '@/lib/copilot/async-runs/repository' import { @@ -8,6 +9,8 @@ import type { StreamWriter } from '@/lib/copilot/request/session' import type { OrchestratorResult } from '@/lib/copilot/request/types' const logger = createLogger('CopilotStreamFinalize') +// Lazy tracer resolution: see comment in lib/copilot/request/otel.ts. +const getTracer = () => trace.getTracer('sim-copilot-finalize', '1.0.0') /** * Single finalization path for stream results. @@ -21,13 +24,35 @@ export async function finalizeStream( aborted: boolean, requestId: string ): Promise { - if (aborted) { - return handleAborted(result, publisher, runId, requestId) - } - if (!result.success) { - return handleError(result, publisher, runId, requestId) + const outcome = aborted ? 'aborted' : result.success ? 'success' : 'error' + const span = getTracer().startSpan('copilot.finalize_stream', { + attributes: { + 'copilot.finalize.outcome': outcome, + 'copilot.run.id': runId, + 'copilot.request.id': requestId, + 'copilot.result.tool_calls': result.toolCalls?.length ?? 0, + 'copilot.result.content_blocks': result.contentBlocks?.length ?? 0, + 'copilot.result.content_length': result.content?.length ?? 0, + 'copilot.publisher.saw_complete': publisher.sawComplete, + 'copilot.publisher.client_disconnected': publisher.clientDisconnected, + }, + }) + try { + if (aborted) { + await handleAborted(result, publisher, runId, requestId) + } else if (!result.success) { + span.setStatus({ code: SpanStatusCode.ERROR, message: result.error || 'orchestration failed' }) + await handleError(result, publisher, runId, requestId) + } else { + await handleSuccess(publisher, runId, requestId) + } + } catch (error) { + span.recordException(error instanceof Error ? error : new Error(String(error))) + span.setStatus({ code: SpanStatusCode.ERROR, message: 'finalize threw' }) + throw error + } finally { + span.end() } - return handleSuccess(publisher, runId, requestId) } async function handleAborted( diff --git a/apps/sim/lib/copilot/request/lifecycle/headless.test.ts b/apps/sim/lib/copilot/request/lifecycle/headless.test.ts index 10b8f656689..49ada3d6574 100644 --- a/apps/sim/lib/copilot/request/lifecycle/headless.test.ts +++ b/apps/sim/lib/copilot/request/lifecycle/headless.test.ts @@ -2,89 +2,96 @@ * @vitest-environment node */ -import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' -import { RequestTraceV1Outcome } from '@/lib/copilot/generated/request-trace-v1' -import type { OrchestratorResult } from '@/lib/copilot/request/types' +import { propagation, trace } from "@opentelemetry/api"; +import { W3CTraceContextPropagator } from "@opentelemetry/core"; +import { BasicTracerProvider } from "@opentelemetry/sdk-trace-base"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { RequestTraceV1Outcome } from "@/lib/copilot/generated/request-trace-v1"; +import type { OrchestratorResult } from "@/lib/copilot/request/types"; const { runCopilotLifecycle } = vi.hoisted(() => ({ runCopilotLifecycle: vi.fn(), -})) +})); -vi.mock('@/lib/copilot/request/lifecycle/run', () => ({ +vi.mock("@/lib/copilot/request/lifecycle/run", () => ({ runCopilotLifecycle, -})) +})); -import { runHeadlessCopilotLifecycle } from './headless' +import { runHeadlessCopilotLifecycle } from "./headless"; -function createLifecycleResult(overrides?: Partial): OrchestratorResult { +function createLifecycleResult( + overrides?: Partial, +): OrchestratorResult { return { success: true, - content: 'done', + content: "done", contentBlocks: [], toolCalls: [], - chatId: 'chat-1', + chatId: "chat-1", ...overrides, - } + }; } -describe('runHeadlessCopilotLifecycle', () => { +describe("runHeadlessCopilotLifecycle", () => { beforeEach(() => { + trace.setGlobalTracerProvider(new BasicTracerProvider()); + propagation.setGlobalPropagator(new W3CTraceContextPropagator()); vi.stubGlobal( - 'fetch', + "fetch", vi.fn().mockResolvedValue( new Response(null, { status: 200, - }) - ) - ) - }) + }), + ), + ); + }); afterEach(() => { - vi.clearAllMocks() - vi.unstubAllGlobals() - }) + vi.clearAllMocks(); + vi.unstubAllGlobals(); + }); - it('reports a successful headless trace', async () => { + it("reports a successful headless trace", async () => { runCopilotLifecycle.mockResolvedValueOnce( createLifecycleResult({ usage: { prompt: 10, completion: 5 }, cost: { input: 1, output: 2, total: 3 }, - }) - ) + }), + ); const result = await runHeadlessCopilotLifecycle( { - message: 'hello', - messageId: 'req-1', + message: "hello", + messageId: "req-1", }, { - userId: 'user-1', - chatId: 'chat-1', - workflowId: 'workflow-1', - goRoute: '/api/mothership/execute', + userId: "user-1", + chatId: "chat-1", + workflowId: "workflow-1", + goRoute: "/api/mothership/execute", interactive: false, - } - ) + }, + ); - expect(result.success).toBe(true) + expect(result.success).toBe(true); expect(runCopilotLifecycle).toHaveBeenCalledWith( - expect.objectContaining({ messageId: 'req-1' }), + expect.objectContaining({ messageId: "req-1" }), expect.objectContaining({ - simRequestId: 'req-1', + simRequestId: "req-1", trace: expect.any(Object), - chatId: 'chat-1', - }) - ) - - expect(fetch).toHaveBeenCalledTimes(1) - const [url, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit] - expect(url).toContain('/api/traces') - const body = JSON.parse(String(init.body)) + chatId: "chat-1", + }), + ); + + expect(fetch).toHaveBeenCalledTimes(1); + const [url, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit]; + expect(url).toContain("/api/traces"); + const body = JSON.parse(String(init.body)); expect(body).toEqual( expect.objectContaining({ - simRequestId: 'req-1', + simRequestId: "req-1", outcome: RequestTraceV1Outcome.success, - chatId: 'chat-1', + chatId: "chat-1", usage: { inputTokens: 10, outputTokens: 5, @@ -93,89 +100,131 @@ describe('runHeadlessCopilotLifecycle', () => { rawTotalCost: 3, billedTotalCost: 3, }, - }) - ) - }) + }), + ); + }); - it('reports an error trace when the lifecycle result is unsuccessful', async () => { + it("reports an error trace when the lifecycle result is unsuccessful", async () => { runCopilotLifecycle.mockResolvedValueOnce( createLifecycleResult({ success: false, - error: 'failed', - }) - ) + error: "failed", + }), + ); const result = await runHeadlessCopilotLifecycle( { - message: 'hello', - messageId: 'req-2', + message: "hello", + messageId: "req-2", }, { - userId: 'user-1', - chatId: 'chat-1', - workflowId: 'workflow-1', - goRoute: '/api/mothership/execute', + userId: "user-1", + chatId: "chat-1", + workflowId: "workflow-1", + goRoute: "/api/mothership/execute", interactive: false, - } - ) + }, + ); - expect(result.success).toBe(false) - const [, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit] - const body = JSON.parse(String(init.body)) - expect(body.outcome).toBe(RequestTraceV1Outcome.error) - }) + expect(result.success).toBe(false); + const [, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit]; + const body = JSON.parse(String(init.body)); + expect(body.outcome).toBe(RequestTraceV1Outcome.error); + }); - it('prefers an explicit simRequestId over the payload messageId', async () => { - runCopilotLifecycle.mockResolvedValueOnce(createLifecycleResult()) + it("prefers an explicit simRequestId over the payload messageId", async () => { + runCopilotLifecycle.mockResolvedValueOnce(createLifecycleResult()); await runHeadlessCopilotLifecycle( { - message: 'hello', - messageId: 'message-req-id', + message: "hello", + messageId: "message-req-id", }, { - userId: 'user-1', - chatId: 'chat-1', - workflowId: 'workflow-1', - simRequestId: 'workflow-request-id', - goRoute: '/api/mothership/execute', + userId: "user-1", + chatId: "chat-1", + workflowId: "workflow-1", + simRequestId: "workflow-request-id", + goRoute: "/api/mothership/execute", interactive: false, - } - ) + }, + ); expect(runCopilotLifecycle).toHaveBeenCalledWith( - expect.objectContaining({ messageId: 'message-req-id' }), + expect.objectContaining({ messageId: "message-req-id" }), expect.objectContaining({ - simRequestId: 'workflow-request-id', - }) - ) + simRequestId: "workflow-request-id", + }), + ); + + const [, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit]; + const body = JSON.parse(String(init.body)); + expect(body.simRequestId).toBe("workflow-request-id"); + }); + + it("passes an OTel context to the lifecycle and trace report", async () => { + let lifecycleTraceparent = ""; + runCopilotLifecycle.mockImplementationOnce(async (_payload, options) => { + const { traceHeaders } = + await import("@/lib/copilot/request/go/propagation"); + lifecycleTraceparent = + traceHeaders({}, options.otelContext).traceparent ?? ""; + return createLifecycleResult(); + }); - const [, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit] - const body = JSON.parse(String(init.body)) - expect(body.simRequestId).toBe('workflow-request-id') - }) - - it('reports an error trace when the lifecycle throws', async () => { - runCopilotLifecycle.mockRejectedValueOnce(new Error('kaboom')) + await runHeadlessCopilotLifecycle( + { + message: "hello", + messageId: "req-otel", + }, + { + userId: "user-1", + chatId: "chat-1", + workflowId: "workflow-1", + goRoute: "/api/mothership/execute", + interactive: false, + }, + ); + + expect(lifecycleTraceparent).toMatch( + /^00-[0-9a-f]{32}-[0-9a-f]{16}-0[0-9a-f]$/, + ); + const [, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit]; + const headers = init.headers as Record; + // The outbound trace report now runs inside its own OTel child span, so + // traceparent has the same trace-id as the lifecycle but a different + // span-id. Both must stay on the same trace. + const lifecycleTraceId = lifecycleTraceparent.split("-")[1]; + expect(headers.traceparent).toMatch( + /^00-[0-9a-f]{32}-[0-9a-f]{16}-0[0-9a-f]$/, + ); + expect(headers.traceparent.split("-")[1]).toBe(lifecycleTraceId); + expect(headers.traceparent.split("-")[2]).not.toBe( + lifecycleTraceparent.split("-")[2], + ); + }); + + it("reports an error trace when the lifecycle throws", async () => { + runCopilotLifecycle.mockRejectedValueOnce(new Error("kaboom")); await expect( runHeadlessCopilotLifecycle( { - message: 'hello', - messageId: 'req-3', + message: "hello", + messageId: "req-3", }, { - userId: 'user-1', - chatId: 'chat-1', - workflowId: 'workflow-1', - goRoute: '/api/mothership/execute', + userId: "user-1", + chatId: "chat-1", + workflowId: "workflow-1", + goRoute: "/api/mothership/execute", interactive: false, - } - ) - ).rejects.toThrow('kaboom') - - const [, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit] - const body = JSON.parse(String(init.body)) - expect(body.outcome).toBe(RequestTraceV1Outcome.error) - }) -}) + }, + ), + ).rejects.toThrow("kaboom"); + + const [, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit]; + const body = JSON.parse(String(init.body)); + expect(body.outcome).toBe(RequestTraceV1Outcome.error); + }); +}); diff --git a/apps/sim/lib/copilot/request/lifecycle/headless.ts b/apps/sim/lib/copilot/request/lifecycle/headless.ts index d3e3be12aa4..0083ff51692 100644 --- a/apps/sim/lib/copilot/request/lifecycle/headless.ts +++ b/apps/sim/lib/copilot/request/lifecycle/headless.ts @@ -1,83 +1,104 @@ -import { createLogger } from '@sim/logger' -import type { RequestTraceV1Outcome as RequestTraceOutcome } from '@/lib/copilot/generated/request-trace-v1' +import { createLogger } from "@sim/logger"; +import type { RequestTraceV1Outcome as RequestTraceOutcome } from "@/lib/copilot/generated/request-trace-v1"; import { RequestTraceV1Outcome, RequestTraceV1SpanStatus, -} from '@/lib/copilot/generated/request-trace-v1' -import type { CopilotLifecycleOptions } from '@/lib/copilot/request/lifecycle/run' -import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run' -import { reportTrace, TraceCollector } from '@/lib/copilot/request/trace' -import type { OrchestratorResult } from '@/lib/copilot/request/types' -import { generateId } from '@/lib/core/utils/uuid' +} from "@/lib/copilot/generated/request-trace-v1"; +import type { CopilotLifecycleOptions } from "@/lib/copilot/request/lifecycle/run"; +import { runCopilotLifecycle } from "@/lib/copilot/request/lifecycle/run"; +import { withCopilotOtelContext } from "@/lib/copilot/request/otel"; +import { reportTrace, TraceCollector } from "@/lib/copilot/request/trace"; +import type { OrchestratorResult } from "@/lib/copilot/request/types"; +import { generateId } from "@/lib/core/utils/uuid"; -const logger = createLogger('CopilotHeadlessLifecycle') +const logger = createLogger("CopilotHeadlessLifecycle"); export async function runHeadlessCopilotLifecycle( requestPayload: Record, - options: CopilotLifecycleOptions + options: CopilotLifecycleOptions, ): Promise { const simRequestId = - typeof options.simRequestId === 'string' && options.simRequestId.length > 0 + typeof options.simRequestId === "string" && options.simRequestId.length > 0 ? options.simRequestId - : typeof requestPayload.messageId === 'string' && requestPayload.messageId.length > 0 + : typeof requestPayload.messageId === "string" && + requestPayload.messageId.length > 0 ? requestPayload.messageId - : generateId() - const trace = new TraceCollector() - const requestSpan = trace.startSpan('Headless Mothership Request', 'request', { - route: options.goRoute, - workflowId: options.workflowId, - workspaceId: options.workspaceId, - chatId: options.chatId, - }) + : generateId(); + const trace = new TraceCollector(); + const requestSpan = trace.startSpan( + "Headless Mothership Request", + "request", + { + route: options.goRoute, + workflowId: options.workflowId, + workspaceId: options.workspaceId, + chatId: options.chatId, + }, + ); - let result: OrchestratorResult | undefined - let outcome: RequestTraceOutcome = RequestTraceV1Outcome.error + let result: OrchestratorResult | undefined; + let outcome: RequestTraceOutcome = RequestTraceV1Outcome.error; - try { - result = await runCopilotLifecycle(requestPayload, { - ...options, - trace, - simRequestId, - }) - outcome = options.abortSignal?.aborted - ? RequestTraceV1Outcome.cancelled - : result.success - ? RequestTraceV1Outcome.success - : RequestTraceV1Outcome.error - return result - } catch (error) { - outcome = options.abortSignal?.aborted - ? RequestTraceV1Outcome.cancelled - : RequestTraceV1Outcome.error - throw error - } finally { - trace.endSpan( - requestSpan, - outcome === RequestTraceV1Outcome.success - ? RequestTraceV1SpanStatus.ok - : outcome === RequestTraceV1Outcome.cancelled - ? RequestTraceV1SpanStatus.cancelled - : RequestTraceV1SpanStatus.error - ) - - try { - await reportTrace( - trace.build({ - outcome, + return withCopilotOtelContext( + { + requestId: simRequestId, + route: options.goRoute, + chatId: options.chatId, + workflowId: options.workflowId, + executionId: options.executionId, + runId: options.runId, + transport: "headless", + }, + async (otelContext) => { + try { + result = await runCopilotLifecycle(requestPayload, { + ...options, + trace, simRequestId, - chatId: result?.chatId ?? options.chatId, - runId: options.runId, - executionId: options.executionId, - usage: result?.usage, - cost: result?.cost, - }) - ) - } catch (error) { - logger.warn('Failed to report headless trace', { - simRequestId, - chatId: result?.chatId ?? options.chatId, - error: error instanceof Error ? error.message : String(error), - }) - } - } + otelContext, + }); + outcome = options.abortSignal?.aborted + ? RequestTraceV1Outcome.cancelled + : result.success + ? RequestTraceV1Outcome.success + : RequestTraceV1Outcome.error; + return result; + } catch (error) { + outcome = options.abortSignal?.aborted + ? RequestTraceV1Outcome.cancelled + : RequestTraceV1Outcome.error; + throw error; + } finally { + trace.endSpan( + requestSpan, + outcome === RequestTraceV1Outcome.success + ? RequestTraceV1SpanStatus.ok + : outcome === RequestTraceV1Outcome.cancelled + ? RequestTraceV1SpanStatus.cancelled + : RequestTraceV1SpanStatus.error, + ); + + try { + await reportTrace( + trace.build({ + outcome, + simRequestId, + chatId: result?.chatId ?? options.chatId, + runId: options.runId, + executionId: options.executionId, + usage: result?.usage, + cost: result?.cost, + }), + otelContext, + ); + } catch (error) { + logger.warn("Failed to report headless trace", { + simRequestId, + chatId: result?.chatId ?? options.chatId, + error: error instanceof Error ? error.message : String(error), + }); + } + } + }, + ); } diff --git a/apps/sim/lib/copilot/request/lifecycle/run.ts b/apps/sim/lib/copilot/request/lifecycle/run.ts index 1156d3b5928..260863bb6e3 100644 --- a/apps/sim/lib/copilot/request/lifecycle/run.ts +++ b/apps/sim/lib/copilot/request/lifecycle/run.ts @@ -1,61 +1,66 @@ -import { createLogger } from '@sim/logger' -import { createRunSegment, updateRunStatus } from '@/lib/copilot/async-runs/repository' -import { SIM_AGENT_API_URL, SIM_AGENT_VERSION } from '@/lib/copilot/constants' +import type { Context } from "@opentelemetry/api"; +import { createLogger } from "@sim/logger"; +import { + createRunSegment, + updateRunStatus, +} from "@/lib/copilot/async-runs/repository"; +import { SIM_AGENT_API_URL, SIM_AGENT_VERSION } from "@/lib/copilot/constants"; import { MothershipStreamV1EventType, MothershipStreamV1RunKind, MothershipStreamV1ToolOutcome, -} from '@/lib/copilot/generated/mothership-stream-v1' -import { createStreamingContext } from '@/lib/copilot/request/context/request-context' -import { buildToolCallSummaries } from '@/lib/copilot/request/context/result' +} from "@/lib/copilot/generated/mothership-stream-v1"; +import { createStreamingContext } from "@/lib/copilot/request/context/request-context"; +import { buildToolCallSummaries } from "@/lib/copilot/request/context/result"; import { BillingLimitError, CopilotBackendError, runStreamLoop, -} from '@/lib/copilot/request/go/stream' +} from "@/lib/copilot/request/go/stream"; import { getToolCallTerminalData, requireToolCallStateResult, setTerminalToolCallState, -} from '@/lib/copilot/request/tool-call-state' -import { handleBillingLimitResponse } from '@/lib/copilot/request/tools/billing' -import { executeToolAndReport } from '@/lib/copilot/request/tools/executor' -import type { TraceCollector } from '@/lib/copilot/request/trace' -import { RequestTraceV1SpanStatus } from '@/lib/copilot/request/trace' +} from "@/lib/copilot/request/tool-call-state"; +import { handleBillingLimitResponse } from "@/lib/copilot/request/tools/billing"; +import { executeToolAndReport } from "@/lib/copilot/request/tools/executor"; +import type { TraceCollector } from "@/lib/copilot/request/trace"; +import { RequestTraceV1SpanStatus } from "@/lib/copilot/request/trace"; import type { ExecutionContext, OrchestratorOptions, OrchestratorResult, StreamEvent, StreamingContext, -} from '@/lib/copilot/request/types' -import { prepareExecutionContext } from '@/lib/copilot/tools/handlers/context' -import { env } from '@/lib/core/config/env' -import { generateId } from '@/lib/core/utils/uuid' -import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' +} from "@/lib/copilot/request/types"; +import { prepareExecutionContext } from "@/lib/copilot/tools/handlers/context"; +import { env } from "@/lib/core/config/env"; +import { generateId } from "@/lib/core/utils/uuid"; +import { getEffectiveDecryptedEnv } from "@/lib/environment/utils"; -const logger = createLogger('CopilotLifecycle') +const logger = createLogger("CopilotLifecycle"); -const MAX_RESUME_ATTEMPTS = 3 -const RESUME_BACKOFF_MS = [250, 500, 1000] as const +const MAX_RESUME_ATTEMPTS = 3; +const RESUME_BACKOFF_MS = [250, 500, 1000] as const; export interface CopilotLifecycleOptions extends OrchestratorOptions { - userId: string - workflowId?: string - workspaceId?: string - chatId?: string - executionId?: string - runId?: string - goRoute?: string - trace?: TraceCollector - simRequestId?: string - onGoTraceId?: (goTraceId: string) => void - executionContext?: ExecutionContext + userId: string; + workflowId?: string; + workspaceId?: string; + chatId?: string; + executionId?: string; + runId?: string; + goRoute?: string; + trace?: TraceCollector; + simRequestId?: string; + otelContext?: Context; + onGoTraceId?: (goTraceId: string) => void; + executionContext?: ExecutionContext; } export async function runCopilotLifecycle( requestPayload: Record, - options: CopilotLifecycleOptions + options: CopilotLifecycleOptions, ): Promise { const { userId, @@ -64,10 +69,12 @@ export async function runCopilotLifecycle( chatId, executionId, runId, - goRoute = '/api/copilot', - } = options + goRoute = "/api/copilot", + } = options; const payloadMsgId = - typeof requestPayload?.messageId === 'string' ? requestPayload.messageId : generateId() + typeof requestPayload?.messageId === "string" + ? requestPayload.messageId + : generateId(); const runIdentity = await ensureHeadlessRunIdentity({ requestPayload, userId, @@ -77,9 +84,9 @@ export async function runCopilotLifecycle( executionId, runId, messageId: payloadMsgId, - }) - const resolvedExecutionId = runIdentity.executionId ?? executionId - const resolvedRunId = runIdentity.runId ?? runId + }); + const resolvedExecutionId = runIdentity.executionId ?? executionId; + const resolvedRunId = runIdentity.runId ?? runId; const lifecycleOptions: CopilotLifecycleOptions = { ...options, executionId: resolvedExecutionId, @@ -95,7 +102,7 @@ export async function runCopilotLifecycle( }, } : {}), - } + }; const execContext = lifecycleOptions.executionContext ?? @@ -107,18 +114,25 @@ export async function runCopilotLifecycle( executionId: resolvedExecutionId, runId: resolvedRunId, abortSignal: lifecycleOptions.abortSignal, - })) + })); const context = createStreamingContext({ chatId, + requestId: lifecycleOptions.simRequestId, executionId: resolvedExecutionId, runId: resolvedRunId, messageId: payloadMsgId, ...(lifecycleOptions.trace ? { trace: lifecycleOptions.trace } : {}), - }) + }); try { - await runCheckpointLoop(requestPayload, context, execContext, lifecycleOptions, goRoute) + await runCheckpointLoop( + requestPayload, + context, + execContext, + lifecycleOptions, + goRoute, + ); const result: OrchestratorResult = { success: context.errors.length === 0 && !context.wasAborted, @@ -130,21 +144,24 @@ export async function runCopilotLifecycle( errors: context.errors.length ? context.errors : undefined, usage: context.usage, cost: context.cost, - } - await lifecycleOptions.onComplete?.(result) - return result + }; + await lifecycleOptions.onComplete?.(result); + return result; } catch (error) { - const err = error instanceof Error ? error : new Error('Copilot orchestration failed') - logger.error('Copilot orchestration failed', { error: err.message }) - await lifecycleOptions.onError?.(err) + const err = + error instanceof Error + ? error + : new Error("Copilot orchestration failed"); + logger.error("Copilot orchestration failed", { error: err.message }); + await lifecycleOptions.onError?.(err); return { success: false, - content: '', + content: "", contentBlocks: [], toolCalls: [], chatId: context.chatId, error: err.message, - } + }; } } @@ -157,21 +174,21 @@ async function runCheckpointLoop( context: StreamingContext, execContext: ExecutionContext, options: CopilotLifecycleOptions, - initialRoute: string + initialRoute: string, ): Promise { - let route = initialRoute - let payload: Record = initialPayload - let resumeAttempt = 0 - const callerOnEvent = options.onEvent + let route = initialRoute; + let payload: Record = initialPayload; + let resumeAttempt = 0; + const callerOnEvent = options.onEvent; for (;;) { - context.streamComplete = false - const isResume = route === '/api/tools/resume' + context.streamComplete = false; + const isResume = route === "/api/tools/resume"; if (isResume && isAborted(options, context)) { - cancelPendingTools(context) - context.awaitingAsyncContinuation = undefined - break + cancelPendingTools(context); + context.awaitingAsyncContinuation = undefined; + break; } const loopOptions = { @@ -183,90 +200,102 @@ async function runCheckpointLoop( options.runId ) { try { - await updateRunStatus(options.runId, 'paused_waiting_for_tool') + await updateRunStatus(options.runId, "paused_waiting_for_tool"); } catch (error) { - logger.warn('Failed to mark run as paused_waiting_for_tool', { + logger.warn("Failed to mark run as paused_waiting_for_tool", { runId: options.runId, error: error instanceof Error ? error.message : String(error), - }) + }); } } - await callerOnEvent?.(event) + await callerOnEvent?.(event); }, - } + }; const streamSpan = context.trace.startSpan( - isResume ? 'Sim → Go (Resume)' : 'Sim → Go Stream', - isResume ? 'lifecycle.resume' : 'sim.stream', + isResume ? "Sim → Go (Resume)" : "Sim → Go Stream", + isResume ? "lifecycle.resume" : "sim.stream", { route, isResume, ...(isResume ? { attempt: resumeAttempt } : {}), - } - ) - context.trace.setActiveSpan(streamSpan) + }, + ); + context.trace.setActiveSpan(streamSpan); - logger.info('Starting stream loop', { + logger.info("Starting stream loop", { route, isResume, resumeAttempt, pendingToolPromises: context.pendingToolPromises.size, toolCallCount: context.toolCalls.size, hasCheckpoint: !!context.awaitingAsyncContinuation, - }) + }); try { await runStreamLoop( `${SIM_AGENT_API_URL}${route}`, { - method: 'POST', + method: "POST", headers: { - 'Content-Type': 'application/json', - ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), - 'X-Client-Version': SIM_AGENT_VERSION, - ...(options.simRequestId ? { 'X-Sim-Request-ID': options.simRequestId } : {}), + "Content-Type": "application/json", + ...(env.COPILOT_API_KEY + ? { "x-api-key": env.COPILOT_API_KEY } + : {}), + "X-Client-Version": SIM_AGENT_VERSION, + ...(options.simRequestId + ? { "X-Sim-Request-ID": options.simRequestId } + : {}), }, body: JSON.stringify(payload), }, context, execContext, - loopOptions - ) + loopOptions, + ); const streamStatus = isAborted(options, context) ? RequestTraceV1SpanStatus.cancelled : context.errors.length > 0 ? RequestTraceV1SpanStatus.error - : RequestTraceV1SpanStatus.ok - context.trace.endSpan(streamSpan, streamStatus) - context.trace.setActiveSpan(undefined) - resumeAttempt = 0 + : RequestTraceV1SpanStatus.ok; + context.trace.endSpan(streamSpan, streamStatus); + context.trace.setActiveSpan(undefined); + resumeAttempt = 0; } catch (streamError) { - context.trace.endSpan(streamSpan, RequestTraceV1SpanStatus.error) - context.trace.setActiveSpan(undefined) + context.trace.endSpan(streamSpan, RequestTraceV1SpanStatus.error); + context.trace.setActiveSpan(undefined); if (streamError instanceof BillingLimitError) { - await handleBillingLimitResponse(streamError.userId, context, execContext, options) - break + await handleBillingLimitResponse( + streamError.userId, + context, + execContext, + options, + ); + break; } if ( isResume && isRetryableStreamError(streamError) && resumeAttempt < MAX_RESUME_ATTEMPTS - 1 ) { - resumeAttempt++ - const backoff = RESUME_BACKOFF_MS[resumeAttempt - 1] ?? 1000 - logger.warn('Resume stream failed, retrying', { + resumeAttempt++; + const backoff = RESUME_BACKOFF_MS[resumeAttempt - 1] ?? 1000; + logger.warn("Resume stream failed, retrying", { attempt: resumeAttempt + 1, maxAttempts: MAX_RESUME_ATTEMPTS, backoffMs: backoff, - error: streamError instanceof Error ? streamError.message : String(streamError), - }) - await sleepWithAbort(backoff, options.abortSignal) - continue + error: + streamError instanceof Error + ? streamError.message + : String(streamError), + }); + await sleepWithAbort(backoff, options.abortSignal); + continue; } - throw streamError + throw streamError; } - logger.info('Stream loop completed', { + logger.info("Stream loop completed", { route, isResume, isAborted: isAborted(options, context), @@ -275,131 +304,142 @@ async function runCheckpointLoop( pendingToolPromises: context.pendingToolPromises.size, streamComplete: context.streamComplete, toolCallCount: context.toolCalls.size, - }) + }); if (isAborted(options, context)) { - cancelPendingTools(context) - context.awaitingAsyncContinuation = undefined - break + cancelPendingTools(context); + context.awaitingAsyncContinuation = undefined; + break; } - const continuation = context.awaitingAsyncContinuation - if (!continuation) break + const continuation = context.awaitingAsyncContinuation; + if (!continuation) break; if (context.pendingToolPromises.size > 0) { - const waitSpan = context.trace.startSpan('Wait for Tools', 'lifecycle.wait_tools', { - checkpointId: continuation.checkpointId, - pendingCount: context.pendingToolPromises.size, - }) - logger.info('Waiting for in-flight tool executions before resume', { + const waitSpan = context.trace.startSpan( + "Wait for Tools", + "lifecycle.wait_tools", + { + checkpointId: continuation.checkpointId, + pendingCount: context.pendingToolPromises.size, + }, + ); + logger.info("Waiting for in-flight tool executions before resume", { checkpointId: continuation.checkpointId, pendingCount: context.pendingToolPromises.size, - }) - await Promise.allSettled(context.pendingToolPromises.values()) - context.trace.endSpan(waitSpan) + }); + await Promise.allSettled(context.pendingToolPromises.values()); + context.trace.endSpan(waitSpan); } if (isAborted(options, context)) { - cancelPendingTools(context) - context.awaitingAsyncContinuation = undefined - break + cancelPendingTools(context); + context.awaitingAsyncContinuation = undefined; + break; } - const undispatchedToolIds = continuation.pendingToolCallIds.filter((toolCallId) => { - const tool = context.toolCalls.get(toolCallId) - return ( - !!tool && - !tool.result && - !tool.error && - !context.pendingToolPromises.has(toolCallId) && - tool.status !== 'executing' - ) - }) + const undispatchedToolIds = continuation.pendingToolCallIds.filter( + (toolCallId) => { + const tool = context.toolCalls.get(toolCallId); + return ( + !!tool && + !tool.result && + !tool.error && + !context.pendingToolPromises.has(toolCallId) && + tool.status !== "executing" + ); + }, + ); if (undispatchedToolIds.length > 0) { - logger.warn('Checkpointed tools were never dispatched; executing before resume', { - checkpointId: continuation.checkpointId, - toolCallIds: undispatchedToolIds, - }) + logger.warn( + "Checkpointed tools were never dispatched; executing before resume", + { + checkpointId: continuation.checkpointId, + toolCallIds: undispatchedToolIds, + }, + ); await Promise.allSettled( undispatchedToolIds.map((toolCallId) => - executeToolAndReport(toolCallId, context, execContext, options) - ) - ) + executeToolAndReport(toolCallId, context, execContext, options), + ), + ); } if (isAborted(options, context)) { - cancelPendingTools(context) - context.awaitingAsyncContinuation = undefined - break + cancelPendingTools(context); + context.awaitingAsyncContinuation = undefined; + break; } const results: Array<{ - callId: string - name: string - data: unknown - success: boolean - }> = [] + callId: string; + name: string; + data: unknown; + success: boolean; + }> = []; for (const toolCallId of continuation.pendingToolCallIds) { if (isAborted(options, context)) { - cancelPendingTools(context) - context.awaitingAsyncContinuation = undefined - break + cancelPendingTools(context); + context.awaitingAsyncContinuation = undefined; + break; } - const tool = context.toolCalls.get(toolCallId) + const tool = context.toolCalls.get(toolCallId); if (!tool || !tool.result) { - logger.error('Missing tool result for pending tool call', { + logger.error("Missing tool result for pending tool call", { toolCallId, checkpointId: continuation.checkpointId, hasToolEntry: !!tool, toolName: tool?.name, toolStatus: tool?.status, hasPendingPromise: context.pendingToolPromises.has(toolCallId), - }) - throw new Error(`Cannot resume: missing result for pending tool call ${toolCallId}`) + }); + throw new Error( + `Cannot resume: missing result for pending tool call ${toolCallId}`, + ); } results.push({ callId: toolCallId, - name: tool.name || '', + name: tool.name || "", data: getToolCallTerminalData(tool), success: requireToolCallStateResult(tool).success, - }) + }); } if (isAborted(options, context)) { - cancelPendingTools(context) - context.awaitingAsyncContinuation = undefined - break + cancelPendingTools(context); + context.awaitingAsyncContinuation = undefined; + break; } - logger.info('Resuming with tool results', { + logger.info("Resuming with tool results", { checkpointId: continuation.checkpointId, runId: continuation.runId, toolCount: results.length, pendingToolCallIds: continuation.pendingToolCallIds, frameCount: continuation.frames?.length ?? 0, - }) + }); - context.awaitingAsyncContinuation = undefined - route = '/api/tools/resume' + context.awaitingAsyncContinuation = undefined; + route = "/api/tools/resume"; payload = { streamId: context.messageId, checkpointId: continuation.checkpointId, results, - } + }; if (isAborted(options, context)) { - cancelPendingTools(context) - context.awaitingAsyncContinuation = undefined - break + cancelPendingTools(context); + context.awaitingAsyncContinuation = undefined; + break; } - logger.info('Prepared resume request payload', { + logger.info("Prepared resume request payload", { route, streamId: context.messageId, checkpointId: continuation.checkpointId, resultCount: results.length, - }) + }); } } @@ -410,64 +450,80 @@ async function runCheckpointLoop( async function buildExecutionContext( requestPayload: Record, params: { - userId: string - workflowId?: string - workspaceId?: string - chatId?: string - executionId?: string - runId?: string - abortSignal?: AbortSignal - } + userId: string; + workflowId?: string; + workspaceId?: string; + chatId?: string; + executionId?: string; + runId?: string; + abortSignal?: AbortSignal; + }, ): Promise { - const { userId, workflowId, workspaceId, chatId, executionId, runId, abortSignal } = params + const { + userId, + workflowId, + workspaceId, + chatId, + executionId, + runId, + abortSignal, + } = params; const userTimezone = - typeof requestPayload?.userTimezone === 'string' ? requestPayload.userTimezone : undefined - const requestMode = typeof requestPayload?.mode === 'string' ? requestPayload.mode : undefined + typeof requestPayload?.userTimezone === "string" + ? requestPayload.userTimezone + : undefined; + const requestMode = + typeof requestPayload?.mode === "string" ? requestPayload.mode : undefined; - let execContext: ExecutionContext + let execContext: ExecutionContext; if (workflowId) { - execContext = await prepareExecutionContext(userId, workflowId, chatId) + execContext = await prepareExecutionContext(userId, workflowId, chatId); } else { - const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId) + const decryptedEnvVars = await getEffectiveDecryptedEnv( + userId, + workspaceId, + ); execContext = { userId, - workflowId: '', + workflowId: "", workspaceId, chatId, decryptedEnvVars, - } + }; } - if (userTimezone) execContext.userTimezone = userTimezone - execContext.copilotToolExecution = true - if (requestMode) execContext.requestMode = requestMode + if (userTimezone) execContext.userTimezone = userTimezone; + execContext.copilotToolExecution = true; + if (requestMode) execContext.requestMode = requestMode; execContext.messageId = - typeof requestPayload?.messageId === 'string' ? requestPayload.messageId : undefined - execContext.executionId = executionId - execContext.runId = runId - execContext.abortSignal = abortSignal - return execContext + typeof requestPayload?.messageId === "string" + ? requestPayload.messageId + : undefined; + execContext.executionId = executionId; + execContext.runId = runId; + execContext.abortSignal = abortSignal; + return execContext; } async function ensureHeadlessRunIdentity(input: { - requestPayload: Record - userId: string - workflowId?: string - workspaceId?: string - chatId?: string - executionId?: string - runId?: string - messageId: string + requestPayload: Record; + userId: string; + workflowId?: string; + workspaceId?: string; + chatId?: string; + executionId?: string; + runId?: string; + messageId: string; }): Promise<{ executionId?: string; runId?: string }> { if (!input.chatId || input.executionId || input.runId) { return { executionId: input.executionId, runId: input.runId, - } + }; } - const executionId = generateId() - const runId = generateId() + const executionId = generateId(); + const runId = generateId(); try { await createRunSegment({ @@ -478,21 +534,26 @@ async function ensureHeadlessRunIdentity(input: { workflowId: input.workflowId, workspaceId: input.workspaceId, streamId: input.messageId, - model: typeof input.requestPayload?.model === 'string' ? input.requestPayload.model : null, + model: + typeof input.requestPayload?.model === "string" + ? input.requestPayload.model + : null, provider: - typeof input.requestPayload?.provider === 'string' ? input.requestPayload.provider : null, + typeof input.requestPayload?.provider === "string" + ? input.requestPayload.provider + : null, requestContext: { - source: 'headless_lifecycle', + source: "headless_lifecycle", }, - }) - return { executionId, runId } + }); + return { executionId, runId }; } catch (error) { - logger.warn('Failed to create headless run identity', { + logger.warn("Failed to create headless run identity", { chatId: input.chatId, messageId: input.messageId, error: error instanceof Error ? error.message : String(error), - }) - return {} + }); + return {}; } } @@ -500,51 +561,54 @@ async function ensureHeadlessRunIdentity(input: { // Helpers // --------------------------------------------------------------------------- -function isAborted(options: CopilotLifecycleOptions, context: StreamingContext): boolean { - return !!(options.abortSignal?.aborted || context.wasAborted) +function isAborted( + options: CopilotLifecycleOptions, + context: StreamingContext, +): boolean { + return !!(options.abortSignal?.aborted || context.wasAborted); } function cancelPendingTools(context: StreamingContext): void { for (const [, toolCall] of context.toolCalls) { - if (toolCall.status === 'pending' || toolCall.status === 'executing') { + if (toolCall.status === "pending" || toolCall.status === "executing") { setTerminalToolCallState(toolCall, { status: MothershipStreamV1ToolOutcome.cancelled, - error: 'Stopped by user', - }) + error: "Stopped by user", + }); } } } function isRetryableStreamError(error: unknown): boolean { - if (error instanceof DOMException && error.name === 'AbortError') { - return false + if (error instanceof DOMException && error.name === "AbortError") { + return false; } if (error instanceof CopilotBackendError) { - return error.status !== undefined && error.status >= 500 + return error.status !== undefined && error.status >= 500; } if (error instanceof TypeError) { - return true + return true; } - return false + return false; } function sleepWithAbort(ms: number, abortSignal?: AbortSignal): Promise { if (!abortSignal) { - return new Promise((resolve) => setTimeout(resolve, ms)) + return new Promise((resolve) => setTimeout(resolve, ms)); } if (abortSignal.aborted) { - return Promise.resolve() + return Promise.resolve(); } return new Promise((resolve) => { const timeoutId = setTimeout(() => { - abortSignal.removeEventListener('abort', onAbort) - resolve() - }, ms) + abortSignal.removeEventListener("abort", onAbort); + resolve(); + }, ms); const onAbort = () => { - clearTimeout(timeoutId) - abortSignal.removeEventListener('abort', onAbort) - resolve() - } - abortSignal.addEventListener('abort', onAbort, { once: true }) - }) + clearTimeout(timeoutId); + abortSignal.removeEventListener("abort", onAbort); + resolve(); + }; + abortSignal.addEventListener("abort", onAbort, { once: true }); + }); } diff --git a/apps/sim/lib/copilot/request/lifecycle/start.test.ts b/apps/sim/lib/copilot/request/lifecycle/start.test.ts index 9c5ee7adacf..a3a7f19d033 100644 --- a/apps/sim/lib/copilot/request/lifecycle/start.test.ts +++ b/apps/sim/lib/copilot/request/lifecycle/start.test.ts @@ -2,8 +2,11 @@ * @vitest-environment node */ -import { beforeEach, describe, expect, it, vi } from 'vitest' -import { MothershipStreamV1EventType } from '@/lib/copilot/generated/mothership-stream-v1' +import { propagation, trace } from "@opentelemetry/api"; +import { W3CTraceContextPropagator } from "@opentelemetry/core"; +import { BasicTracerProvider } from "@opentelemetry/sdk-trace-base"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { MothershipStreamV1EventType } from "@/lib/copilot/generated/mothership-stream-v1"; const { runCopilotLifecycle, @@ -31,20 +34,20 @@ const { cleanupAbortMarker: vi.fn(), hasAbortMarker: vi.fn(), releasePendingChatStream: vi.fn(), -})) +})); -vi.mock('@/lib/copilot/request/lifecycle/run', () => ({ +vi.mock("@/lib/copilot/request/lifecycle/run", () => ({ runCopilotLifecycle, -})) +})); -vi.mock('@/lib/copilot/async-runs/repository', () => ({ +vi.mock("@/lib/copilot/async-runs/repository", () => ({ createRunSegment, updateRunStatus, -})) +})); -let mockPublisherController: ReadableStreamDefaultController | null = null +let mockPublisherController: ReadableStreamDefaultController | null = null; -vi.mock('@/lib/copilot/request/session', () => ({ +vi.mock("@/lib/copilot/request/session", () => ({ resetBuffer, clearFilePreviewSessions, scheduleBufferCleanup, @@ -58,37 +61,43 @@ vi.mock('@/lib/copilot/request/session', () => ({ unregisterActiveStream: vi.fn(), startAbortPoller: vi.fn().mockReturnValue(setInterval(() => {}, 999999)), SSE_RESPONSE_HEADERS: {}, - StreamWriter: vi.fn().mockImplementation(() => ({ - attach: vi.fn().mockImplementation((ctrl: ReadableStreamDefaultController) => { - mockPublisherController = ctrl - }), - startKeepalive: vi.fn(), - stopKeepalive: vi.fn(), - flush: vi.fn(), - close: vi.fn().mockImplementation(() => { - try { - mockPublisherController?.close() - } catch { - // already closed - } - }), - markDisconnected: vi.fn(), - publish: vi.fn().mockImplementation(async (event: Record) => { - appendEvent(event) - }), - get clientDisconnected() { - return false - }, - get sawComplete() { - return false - }, - })), -})) -vi.mock('@/lib/copilot/request/session/sse', () => ({ + StreamWriter: vi.fn().mockImplementation(function () { + return { + attach: vi + .fn() + .mockImplementation((ctrl: ReadableStreamDefaultController) => { + mockPublisherController = ctrl; + }), + startKeepalive: vi.fn(), + stopKeepalive: vi.fn(), + flush: vi.fn(), + close: vi.fn().mockImplementation(() => { + try { + mockPublisherController?.close(); + } catch { + // already closed + } + }), + markDisconnected: vi.fn(), + publish: vi + .fn() + .mockImplementation(async (event: Record) => { + appendEvent(event); + }), + get clientDisconnected() { + return false; + }, + get sawComplete() { + return false; + }, + }; + }), +})); +vi.mock("@/lib/copilot/request/session/sse", () => ({ SSE_RESPONSE_HEADERS: {}, -})) +})); -vi.mock('@sim/db', () => ({ +vi.mock("@sim/db", () => ({ db: { update: vi.fn(() => ({ set: vi.fn(() => ({ @@ -96,98 +105,154 @@ vi.mock('@sim/db', () => ({ })), })), }, -})) +})); -vi.mock('@/lib/copilot/tasks', () => ({ +vi.mock("@/lib/copilot/tasks", () => ({ taskPubSub: null, -})) +})); -import { createSSEStream } from './start' +import { createSSEStream } from "./start"; async function drainStream(stream: ReadableStream) { - const reader = stream.getReader() + const reader = stream.getReader(); while (true) { - const { done } = await reader.read() - if (done) break + const { done } = await reader.read(); + if (done) break; } } -describe('createSSEStream terminal error handling', () => { +describe("createSSEStream terminal error handling", () => { beforeEach(() => { - vi.clearAllMocks() - resetBuffer.mockResolvedValue(undefined) - clearFilePreviewSessions.mockResolvedValue(undefined) - scheduleBufferCleanup.mockResolvedValue(undefined) - scheduleFilePreviewSessionCleanup.mockResolvedValue(undefined) + vi.clearAllMocks(); + trace.setGlobalTracerProvider(new BasicTracerProvider()); + propagation.setGlobalPropagator(new W3CTraceContextPropagator()); + vi.stubGlobal( + "fetch", + vi.fn().mockResolvedValue( + new Response(JSON.stringify({ title: "Test title" }), { + status: 200, + headers: { + "Content-Type": "application/json", + }, + }), + ), + ); + resetBuffer.mockResolvedValue(undefined); + clearFilePreviewSessions.mockResolvedValue(undefined); + scheduleBufferCleanup.mockResolvedValue(undefined); + scheduleFilePreviewSessionCleanup.mockResolvedValue(undefined); allocateCursor - .mockResolvedValueOnce({ seq: 1, cursor: '1' }) - .mockResolvedValueOnce({ seq: 2, cursor: '2' }) - .mockResolvedValueOnce({ seq: 3, cursor: '3' }) - appendEvent.mockImplementation(async (event: unknown) => event) - cleanupAbortMarker.mockResolvedValue(undefined) - hasAbortMarker.mockResolvedValue(false) - releasePendingChatStream.mockResolvedValue(undefined) - createRunSegment.mockResolvedValue(null) - updateRunStatus.mockResolvedValue(null) - }) - - it('writes a terminal error event before close when orchestration returns success=false', async () => { + .mockResolvedValueOnce({ seq: 1, cursor: "1" }) + .mockResolvedValueOnce({ seq: 2, cursor: "2" }) + .mockResolvedValueOnce({ seq: 3, cursor: "3" }); + appendEvent.mockImplementation(async (event: unknown) => event); + cleanupAbortMarker.mockResolvedValue(undefined); + hasAbortMarker.mockResolvedValue(false); + releasePendingChatStream.mockResolvedValue(undefined); + createRunSegment.mockResolvedValue(null); + updateRunStatus.mockResolvedValue(null); + }); + + afterEach(() => { + vi.unstubAllGlobals(); + }); + + it("writes a terminal error event before close when orchestration returns success=false", async () => { runCopilotLifecycle.mockResolvedValue({ success: false, - error: 'resume failed', - content: '', + error: "resume failed", + content: "", contentBlocks: [], toolCalls: [], - }) + }); const stream = createSSEStream({ - requestPayload: { message: 'hello' }, - userId: 'user-1', - streamId: 'stream-1', - executionId: 'exec-1', - runId: 'run-1', + requestPayload: { message: "hello" }, + userId: "user-1", + streamId: "stream-1", + executionId: "exec-1", + runId: "run-1", currentChat: null, isNewChat: false, - message: 'hello', - titleModel: 'gpt-5.4', - requestId: 'req-1', + message: "hello", + titleModel: "gpt-5.4", + requestId: "req-1", orchestrateOptions: {}, - }) + }); - await drainStream(stream) + await drainStream(stream); expect(appendEvent).toHaveBeenCalledWith( expect.objectContaining({ type: MothershipStreamV1EventType.error, - }) - ) - expect(scheduleBufferCleanup).toHaveBeenCalledWith('stream-1') - }) + }), + ); + expect(scheduleBufferCleanup).toHaveBeenCalledWith("stream-1"); + }); - it('writes the thrown terminal error event before close for replay durability', async () => { - runCopilotLifecycle.mockRejectedValue(new Error('kaboom')) + it("writes the thrown terminal error event before close for replay durability", async () => { + runCopilotLifecycle.mockRejectedValue(new Error("kaboom")); const stream = createSSEStream({ - requestPayload: { message: 'hello' }, - userId: 'user-1', - streamId: 'stream-1', - executionId: 'exec-1', - runId: 'run-1', + requestPayload: { message: "hello" }, + userId: "user-1", + streamId: "stream-1", + executionId: "exec-1", + runId: "run-1", currentChat: null, isNewChat: false, - message: 'hello', - titleModel: 'gpt-5.4', - requestId: 'req-1', + message: "hello", + titleModel: "gpt-5.4", + requestId: "req-1", orchestrateOptions: {}, - }) + }); - await drainStream(stream) + await drainStream(stream); expect(appendEvent).toHaveBeenCalledWith( expect.objectContaining({ type: MothershipStreamV1EventType.error, - }) - ) - expect(scheduleBufferCleanup).toHaveBeenCalledWith('stream-1') - }) -}) + }), + ); + expect(scheduleBufferCleanup).toHaveBeenCalledWith("stream-1"); + }); + + it("passes an OTel context into the streaming lifecycle", async () => { + let lifecycleTraceparent = ""; + runCopilotLifecycle.mockImplementation(async (_payload, options) => { + const { traceHeaders } = + await import("@/lib/copilot/request/go/propagation"); + lifecycleTraceparent = + traceHeaders({}, options.otelContext).traceparent ?? ""; + return { + success: true, + content: "OK", + contentBlocks: [], + toolCalls: [], + }; + }); + + const stream = createSSEStream({ + requestPayload: { message: "hello" }, + userId: "user-1", + streamId: "stream-1", + executionId: "exec-1", + runId: "run-1", + currentChat: null, + isNewChat: false, + message: "hello", + titleModel: "gpt-5.4", + requestId: "req-otel", + orchestrateOptions: { + goRoute: "/api/mothership", + workflowId: "workflow-1", + }, + }); + + await drainStream(stream); + + expect(lifecycleTraceparent).toMatch( + /^00-[0-9a-f]{32}-[0-9a-f]{16}-0[0-9a-f]$/, + ); + }); +}); diff --git a/apps/sim/lib/copilot/request/lifecycle/start.ts b/apps/sim/lib/copilot/request/lifecycle/start.ts index caf6fb4df87..b4609d15d19 100644 --- a/apps/sim/lib/copilot/request/lifecycle/start.ts +++ b/apps/sim/lib/copilot/request/lifecycle/start.ts @@ -1,17 +1,18 @@ -import { db } from '@sim/db' -import { copilotChats } from '@sim/db/schema' -import { createLogger } from '@sim/logger' -import { eq } from 'drizzle-orm' -import { createRunSegment } from '@/lib/copilot/async-runs/repository' -import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import type { Context } from "@opentelemetry/api"; +import { db } from "@sim/db"; +import { copilotChats } from "@sim/db/schema"; +import { createLogger } from "@sim/logger"; +import { eq } from "drizzle-orm"; +import { createRunSegment } from "@/lib/copilot/async-runs/repository"; +import { SIM_AGENT_API_URL } from "@/lib/copilot/constants"; import { MothershipStreamV1EventType, MothershipStreamV1SessionKind, -} from '@/lib/copilot/generated/mothership-stream-v1' -import { RequestTraceV1Outcome } from '@/lib/copilot/generated/request-trace-v1' -import { finalizeStream } from '@/lib/copilot/request/lifecycle/finalize' -import type { CopilotLifecycleOptions } from '@/lib/copilot/request/lifecycle/run' -import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run' +} from "@/lib/copilot/generated/mothership-stream-v1"; +import { RequestTraceV1Outcome } from "@/lib/copilot/generated/request-trace-v1"; +import { finalizeStream } from "@/lib/copilot/request/lifecycle/finalize"; +import type { CopilotLifecycleOptions } from "@/lib/copilot/request/lifecycle/run"; +import { runCopilotLifecycle } from "@/lib/copilot/request/lifecycle/run"; import { cleanupAbortMarker, clearFilePreviewSessions, @@ -23,38 +24,41 @@ import { scheduleFilePreviewSessionCleanup, startAbortPoller, unregisterActiveStream, -} from '@/lib/copilot/request/session' -import { SSE_RESPONSE_HEADERS } from '@/lib/copilot/request/session/sse' -import { reportTrace, TraceCollector } from '@/lib/copilot/request/trace' -import { taskPubSub } from '@/lib/copilot/tasks' -import { env } from '@/lib/core/config/env' +} from "@/lib/copilot/request/session"; +import { SSE_RESPONSE_HEADERS } from "@/lib/copilot/request/session/sse"; +import { withCopilotOtelContext } from "@/lib/copilot/request/otel"; +import { reportTrace, TraceCollector } from "@/lib/copilot/request/trace"; +import { taskPubSub } from "@/lib/copilot/tasks"; +import { env } from "@/lib/core/config/env"; -export { SSE_RESPONSE_HEADERS } +export { SSE_RESPONSE_HEADERS }; -const logger = createLogger('CopilotChatStreaming') +const logger = createLogger("CopilotChatStreaming"); type CurrentChatSummary = { - title?: string | null -} | null + title?: string | null; +} | null; export interface StreamingOrchestrationParams { - requestPayload: Record - userId: string - streamId: string - executionId: string - runId: string - chatId?: string - currentChat: CurrentChatSummary - isNewChat: boolean - message: string - titleModel: string - titleProvider?: string - requestId: string - workspaceId?: string - orchestrateOptions: Omit + requestPayload: Record; + userId: string; + streamId: string; + executionId: string; + runId: string; + chatId?: string; + currentChat: CurrentChatSummary; + isNewChat: boolean; + message: string; + titleModel: string; + titleProvider?: string; + requestId: string; + workspaceId?: string; + orchestrateOptions: Omit; } -export function createSSEStream(params: StreamingOrchestrationParams): ReadableStream { +export function createSSEStream( + params: StreamingOrchestrationParams, +): ReadableStream { const { requestPayload, userId, @@ -70,166 +74,212 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS requestId, workspaceId, orchestrateOptions, - } = params + } = params; - const abortController = new AbortController() - registerActiveStream(streamId, abortController) + const abortController = new AbortController(); + registerActiveStream(streamId, abortController); - const publisher = new StreamWriter({ streamId, chatId, requestId }) + const publisher = new StreamWriter({ streamId, chatId, requestId }); - const collector = new TraceCollector() + const collector = new TraceCollector(); return new ReadableStream({ async start(controller) { - publisher.attach(controller) + publisher.attach(controller); - const requestSpan = collector.startSpan('Mothership Request', 'request', { - streamId, - chatId, - runId, - }) - let outcome: 'success' | 'error' | 'cancelled' = 'error' - let lifecycleResult: - | { - usage?: { prompt: number; completion: number } - cost?: { input: number; output: number; total: number } - } - | undefined - - await Promise.all([resetBuffer(streamId), clearFilePreviewSessions(streamId)]) - - if (chatId) { - createRunSegment({ - id: runId, - executionId, + await withCopilotOtelContext( + { + requestId, + route: orchestrateOptions.goRoute, chatId, - userId, - workflowId: (requestPayload.workflowId as string | undefined) || null, - workspaceId, + workflowId: orchestrateOptions.workflowId, + executionId, + runId, streamId, - model: (requestPayload.model as string | undefined) || null, - provider: (requestPayload.provider as string | undefined) || null, - requestContext: { requestId }, - }).catch((error) => { - logger.warn(`[${requestId}] Failed to create copilot run segment`, { - error: error instanceof Error ? error.message : String(error), - }) - }) - } + transport: "stream", + }, + async (otelContext) => { + const requestSpan = collector.startSpan( + "Mothership Request", + "request", + { + streamId, + chatId, + runId, + }, + ); + let outcome: "success" | "error" | "cancelled" = "error"; + let lifecycleResult: + | { + usage?: { prompt: number; completion: number }; + cost?: { input: number; output: number; total: number }; + } + | undefined; - const abortPoller = startAbortPoller(streamId, abortController, { requestId }) - publisher.startKeepalive() + await Promise.all([ + resetBuffer(streamId), + clearFilePreviewSessions(streamId), + ]); - if (chatId) { - publisher.publish({ - type: MothershipStreamV1EventType.session, - payload: { - kind: MothershipStreamV1SessionKind.chat, - chatId, - }, - }) - } + if (chatId) { + createRunSegment({ + id: runId, + executionId, + chatId, + userId, + workflowId: + (requestPayload.workflowId as string | undefined) || null, + workspaceId, + streamId, + model: (requestPayload.model as string | undefined) || null, + provider: (requestPayload.provider as string | undefined) || null, + requestContext: { requestId }, + }).catch((error) => { + logger.warn( + `[${requestId}] Failed to create copilot run segment`, + { + error: error instanceof Error ? error.message : String(error), + }, + ); + }); + } - fireTitleGeneration({ - chatId, - currentChat, - isNewChat, - message, - titleModel, - titleProvider, - workspaceId, - requestId, - publisher, - }) + const abortPoller = startAbortPoller(streamId, abortController, { + requestId, + }); + publisher.startKeepalive(); - try { - const result = await runCopilotLifecycle(requestPayload, { - ...orchestrateOptions, - executionId, - runId, - trace: collector, - simRequestId: requestId, - abortSignal: abortController.signal, - onEvent: async (event) => { - await publisher.publish(event) - }, - }) + if (chatId) { + publisher.publish({ + type: MothershipStreamV1EventType.session, + payload: { + kind: MothershipStreamV1SessionKind.chat, + chatId, + }, + }); + } - lifecycleResult = result - outcome = abortController.signal.aborted - ? RequestTraceV1Outcome.cancelled - : result.success - ? RequestTraceV1Outcome.success - : RequestTraceV1Outcome.error - await finalizeStream(result, publisher, runId, abortController.signal.aborted, requestId) - } catch (error) { - outcome = abortController.signal.aborted - ? RequestTraceV1Outcome.cancelled - : RequestTraceV1Outcome.error - if (publisher.clientDisconnected) { - logger.info(`[${requestId}] Stream errored after client disconnect`, { - error: error instanceof Error ? error.message : 'Stream error', - }) - } - logger.error(`[${requestId}] Unexpected orchestration error:`, error) + fireTitleGeneration({ + chatId, + currentChat, + isNewChat, + message, + titleModel, + titleProvider, + workspaceId, + requestId, + publisher, + otelContext, + }); - const syntheticResult = { - success: false as const, - content: '', - contentBlocks: [], - toolCalls: [], - error: 'An unexpected error occurred while processing the response.', - } - await finalizeStream( - syntheticResult, - publisher, - runId, - abortController.signal.aborted, - requestId - ) - } finally { - collector.endSpan( - requestSpan, - outcome === RequestTraceV1Outcome.success - ? 'ok' - : outcome === RequestTraceV1Outcome.cancelled - ? 'cancelled' - : 'error' - ) + try { + const result = await runCopilotLifecycle(requestPayload, { + ...orchestrateOptions, + executionId, + runId, + trace: collector, + simRequestId: requestId, + otelContext, + abortSignal: abortController.signal, + onEvent: async (event) => { + await publisher.publish(event); + }, + }); - clearInterval(abortPoller) - try { - await publisher.close() - } catch (error) { - logger.warn(`[${requestId}] Failed to flush stream persistence during close`, { - error: error instanceof Error ? error.message : String(error), - }) - } - unregisterActiveStream(streamId) - if (chatId) { - await releasePendingChatStream(chatId, streamId) - } - await scheduleBufferCleanup(streamId) - await scheduleFilePreviewSessionCleanup(streamId) - await cleanupAbortMarker(streamId) + lifecycleResult = result; + outcome = abortController.signal.aborted + ? RequestTraceV1Outcome.cancelled + : result.success + ? RequestTraceV1Outcome.success + : RequestTraceV1Outcome.error; + await finalizeStream( + result, + publisher, + runId, + abortController.signal.aborted, + requestId, + ); + } catch (error) { + outcome = abortController.signal.aborted + ? RequestTraceV1Outcome.cancelled + : RequestTraceV1Outcome.error; + if (publisher.clientDisconnected) { + logger.info( + `[${requestId}] Stream errored after client disconnect`, + { + error: + error instanceof Error ? error.message : "Stream error", + }, + ); + } + logger.error( + `[${requestId}] Unexpected orchestration error:`, + error, + ); - const trace = collector.build({ - outcome: outcome as 'success' | 'error' | 'cancelled', - simRequestId: requestId, - streamId, - chatId, - runId, - executionId, - usage: lifecycleResult?.usage, - cost: lifecycleResult?.cost, - }) - reportTrace(trace).catch(() => {}) - } + const syntheticResult = { + success: false as const, + content: "", + contentBlocks: [], + toolCalls: [], + error: + "An unexpected error occurred while processing the response.", + }; + await finalizeStream( + syntheticResult, + publisher, + runId, + abortController.signal.aborted, + requestId, + ); + } finally { + collector.endSpan( + requestSpan, + outcome === RequestTraceV1Outcome.success + ? "ok" + : outcome === RequestTraceV1Outcome.cancelled + ? "cancelled" + : "error", + ); + + clearInterval(abortPoller); + try { + await publisher.close(); + } catch (error) { + logger.warn( + `[${requestId}] Failed to flush stream persistence during close`, + { + error: error instanceof Error ? error.message : String(error), + }, + ); + } + unregisterActiveStream(streamId); + if (chatId) { + await releasePendingChatStream(chatId, streamId); + } + await scheduleBufferCleanup(streamId); + await scheduleFilePreviewSessionCleanup(streamId); + await cleanupAbortMarker(streamId); + + const trace = collector.build({ + outcome: outcome as "success" | "error" | "cancelled", + simRequestId: requestId, + streamId, + chatId, + runId, + executionId, + usage: lifecycleResult?.usage, + cost: lifecycleResult?.cost, + }); + reportTrace(trace, otelContext).catch(() => {}); + } + }, + ); }, cancel() { - publisher.markDisconnected() + publisher.markDisconnected(); }, - }) + }); } // --------------------------------------------------------------------------- @@ -237,15 +287,16 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS // --------------------------------------------------------------------------- function fireTitleGeneration(params: { - chatId?: string - currentChat: CurrentChatSummary - isNewChat: boolean - message: string - titleModel: string - titleProvider?: string - workspaceId?: string - requestId: string - publisher: StreamWriter + chatId?: string; + currentChat: CurrentChatSummary; + isNewChat: boolean; + message: string; + titleModel: string; + titleProvider?: string; + workspaceId?: string; + requestId: string; + publisher: StreamWriter; + otelContext?: Context; }): void { const { chatId, @@ -257,24 +308,37 @@ function fireTitleGeneration(params: { workspaceId, requestId, publisher, - } = params - if (!chatId || currentChat?.title || !isNewChat) return + otelContext, + } = params; + if (!chatId || currentChat?.title || !isNewChat) return; - requestChatTitle({ message, model: titleModel, provider: titleProvider }) + requestChatTitle({ + message, + model: titleModel, + provider: titleProvider, + otelContext, + }) .then(async (title) => { - if (!title) return - await db.update(copilotChats).set({ title }).where(eq(copilotChats.id, chatId)) + if (!title) return; + await db + .update(copilotChats) + .set({ title }) + .where(eq(copilotChats.id, chatId)); await publisher.publish({ type: MothershipStreamV1EventType.session, payload: { kind: MothershipStreamV1SessionKind.title, title }, - }) + }); if (workspaceId) { - taskPubSub?.publishStatusChanged({ workspaceId, chatId, type: 'renamed' }) + taskPubSub?.publishStatusChanged({ + workspaceId, + chatId, + type: "renamed", + }); } }) .catch((error) => { - logger.error(`[${requestId}] Title generation failed:`, error) - }) + logger.error(`[${requestId}] Title generation failed:`, error); + }); } // --------------------------------------------------------------------------- @@ -282,38 +346,57 @@ function fireTitleGeneration(params: { // --------------------------------------------------------------------------- export async function requestChatTitle(params: { - message: string - model: string - provider?: string + message: string; + model: string; + provider?: string; + otelContext?: Context; }): Promise { - const { message, model, provider } = params - if (!message || !model) return null + const { message, model, provider, otelContext } = params; + if (!message || !model) return null; - const headers: Record = { 'Content-Type': 'application/json' } + const headers: Record = { + "Content-Type": "application/json", + }; if (env.COPILOT_API_KEY) { - headers['x-api-key'] = env.COPILOT_API_KEY + headers["x-api-key"] = env.COPILOT_API_KEY; } try { - const response = await fetch(`${SIM_AGENT_API_URL}/api/generate-chat-title`, { - method: 'POST', - headers, - body: JSON.stringify({ message, model, ...(provider ? { provider } : {}) }), - }) + const { fetchGo } = await import("@/lib/copilot/request/go/fetch"); + const response = await fetchGo( + `${SIM_AGENT_API_URL}/api/generate-chat-title`, + { + method: "POST", + headers, + body: JSON.stringify({ + message, + model, + ...(provider ? { provider } : {}), + }), + otelContext, + spanName: "sim → go /api/generate-chat-title", + operation: "generate_chat_title", + attributes: { + "gen_ai.request.model": model, + ...(provider ? { "gen_ai.system": provider } : {}), + }, + }, + ); - const payload = await response.json().catch(() => ({})) + const payload = await response.json().catch(() => ({})); if (!response.ok) { - logger.warn('Failed to generate chat title via copilot backend', { + logger.warn("Failed to generate chat title via copilot backend", { status: response.status, error: payload, - }) - return null + }); + return null; } - const title = typeof payload?.title === 'string' ? payload.title.trim() : '' - return title || null + const title = + typeof payload?.title === "string" ? payload.title.trim() : ""; + return title || null; } catch (error) { - logger.error('Error generating chat title:', error) - return null + logger.error("Error generating chat title:", error); + return null; } } diff --git a/apps/sim/lib/copilot/request/otel.ts b/apps/sim/lib/copilot/request/otel.ts new file mode 100644 index 00000000000..ec04c6630df --- /dev/null +++ b/apps/sim/lib/copilot/request/otel.ts @@ -0,0 +1,166 @@ +import { randomBytes } from "crypto"; +import { + context, + SpanStatusCode, + TraceFlags, + trace, + type Context, + type Span, + type SpanContext, +} from "@opentelemetry/api"; + +/** + * Resolve the tracer lazily on every call. With Next.js 16 + Turbopack dev + * bundling, a module-level `trace.getTracer(...)` call can be evaluated + * before the NodeSDK in `instrumentation-node.ts` installs the real + * TracerProvider. If that happens, the cached tracer is the NoOpTracer, + * which produces NoOpSpans whose `.end()` never reaches any processor — + * silently disabling all OTel on the Sim side. Calling `trace.getTracer` + * per request ensures we always pick up the currently-registered provider. + */ +export function getCopilotTracer() { + return trace.getTracer("sim-ai-platform", "1.0.0"); +} + +function getTracer() { + return getCopilotTracer(); +} + +/** + * Run `fn` inside an OTel `tool.execute` span. This mirrors the internal + * TraceCollector span that already wraps Sim-side tool work, so the + * external OTLP trace reflects the actual tool execution (the Go side's + * `tool.execute` is just the async enqueue and stays ~0ms). + */ +export async function withCopilotToolSpan( + input: { + toolName: string; + toolCallId: string; + runId?: string; + chatId?: string; + argsBytes?: number; + argsPreview?: string; + }, + fn: (span: Span) => Promise, +): Promise { + const tracer = getTracer(); + return tracer.startActiveSpan( + `tool.execute ${input.toolName}`, + { + attributes: { + "tool.name": input.toolName, + "tool.call_id": input.toolCallId, + "tool.executor": "sim", + ...(input.runId ? { "run.id": input.runId } : {}), + ...(input.chatId ? { "chat.id": input.chatId } : {}), + ...(typeof input.argsBytes === "number" + ? { "tool.args.bytes": input.argsBytes } + : {}), + ...(input.argsPreview ? { "tool.args.preview": input.argsPreview } : {}), + }, + }, + async (span) => { + try { + const result = await fn(span); + span.setStatus({ code: SpanStatusCode.OK }); + return result; + } catch (error) { + span.setStatus({ + code: SpanStatusCode.ERROR, + message: error instanceof Error ? error.message : String(error), + }); + span.recordException( + error instanceof Error ? error : new Error(String(error)), + ); + throw error; + } finally { + span.end(); + } + }, + ); +} + +function isValidSpanContext(spanContext: SpanContext): boolean { + return ( + /^[0-9a-f]{32}$/.test(spanContext.traceId) && + spanContext.traceId !== "00000000000000000000000000000000" && + /^[0-9a-f]{16}$/.test(spanContext.spanId) && + spanContext.spanId !== "0000000000000000" + ); +} + +function createFallbackSpanContext(): SpanContext { + return { + traceId: randomBytes(16).toString("hex"), + spanId: randomBytes(8).toString("hex"), + traceFlags: TraceFlags.SAMPLED, + }; +} + +export interface CopilotOtelScope { + requestId: string; + route?: string; + chatId?: string; + workflowId?: string; + executionId?: string; + runId?: string; + streamId?: string; + transport: "headless" | "stream"; +} + +export async function withCopilotOtelContext( + scope: CopilotOtelScope, + fn: (otelContext: Context) => Promise, +): Promise { + const parentContext = context.active(); + const span = getTracer().startSpan( + "gen_ai.agent.execute", + { + attributes: { + "gen_ai.agent.name": "mothership", + "gen_ai.agent.id": + scope.transport === "stream" + ? "mothership-stream" + : "mothership-headless", + "gen_ai.operation.name": + scope.transport === "stream" ? "chat" : "invoke_agent", + "request.id": scope.requestId, + "sim.request_id": scope.requestId, + "copilot.route": scope.route ?? "", + "copilot.transport": scope.transport, + ...(scope.chatId ? { "chat.id": scope.chatId } : {}), + ...(scope.workflowId ? { "workflow.id": scope.workflowId } : {}), + ...(scope.executionId + ? { "workflow.execution_id": scope.executionId } + : {}), + ...(scope.runId ? { "run.id": scope.runId } : {}), + ...(scope.streamId ? { "stream.id": scope.streamId } : {}), + }, + }, + parentContext, + ); + const carrierSpan = isValidSpanContext(span.spanContext()) + ? span + : trace.wrapSpanContext(createFallbackSpanContext()); + const otelContext = trace.setSpan(parentContext, carrierSpan); + let sawError = false; + + try { + return await context.with(otelContext, () => fn(otelContext)); + } catch (error) { + sawError = true; + span.setStatus({ + code: SpanStatusCode.ERROR, + message: error instanceof Error ? error.message : String(error), + }); + span.recordException( + error instanceof Error ? error : new Error(String(error)), + ); + throw error; + } finally { + if (!sawError) { + span.setStatus({ code: SpanStatusCode.OK }); + } + span.end(); + } +} diff --git a/apps/sim/lib/copilot/request/session/contract.test.ts b/apps/sim/lib/copilot/request/session/contract.test.ts index e9ac58707c6..37724d874e4 100644 --- a/apps/sim/lib/copilot/request/session/contract.test.ts +++ b/apps/sim/lib/copilot/request/session/contract.test.ts @@ -2,207 +2,215 @@ * @vitest-environment node */ -import { describe, expect, it } from 'vitest' +import { describe, expect, it } from "vitest"; import { isContractStreamEventEnvelope, isSyntheticFilePreviewEventEnvelope, parsePersistedStreamEventEnvelope, parsePersistedStreamEventEnvelopeJson, -} from './contract' +} from "./contract"; const BASE_ENVELOPE = { v: 1 as const, seq: 1, - ts: '2026-04-11T00:00:00.000Z', + ts: "2026-04-11T00:00:00.000Z", stream: { - streamId: 'stream-1', - cursor: '1', + streamId: "stream-1", + cursor: "1", }, trace: { - requestId: 'req-1', + requestId: "req-1", }, -} +}; -describe('stream session contract parser', () => { - it('accepts contract text events', () => { +describe("stream session contract parser", () => { + it("accepts contract text events", () => { const event = { ...BASE_ENVELOPE, - type: 'text' as const, + trace: { + ...BASE_ENVELOPE.trace, + goTraceId: "go-trace-1", + }, + type: "text" as const, payload: { - channel: 'assistant' as const, - text: 'hello', + channel: "assistant" as const, + text: "hello", }, - } + }; - expect(isContractStreamEventEnvelope(event)).toBe(true) + expect(isContractStreamEventEnvelope(event)).toBe(true); - const parsed = parsePersistedStreamEventEnvelope(event) + const parsed = parsePersistedStreamEventEnvelope(event); expect(parsed).toEqual({ ok: true, event, - }) - }) + }); + }); - it('accepts contract session chat events', () => { + it("accepts contract session chat events", () => { const event = { ...BASE_ENVELOPE, - type: 'session' as const, - payload: { kind: 'chat' as const, chatId: 'chat-1' }, - } + type: "session" as const, + payload: { kind: "chat" as const, chatId: "chat-1" }, + }; - expect(isContractStreamEventEnvelope(event)).toBe(true) - expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true) - }) + expect(isContractStreamEventEnvelope(event)).toBe(true); + expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true); + }); - it('accepts contract complete events', () => { + it("accepts contract complete events", () => { const event = { ...BASE_ENVELOPE, - type: 'complete' as const, - payload: { status: 'complete' as const }, - } + type: "complete" as const, + payload: { status: "complete" as const }, + }; - expect(isContractStreamEventEnvelope(event)).toBe(true) - expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true) - }) + expect(isContractStreamEventEnvelope(event)).toBe(true); + expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true); + }); - it('accepts contract error events', () => { + it("accepts contract error events", () => { const event = { ...BASE_ENVELOPE, - type: 'error' as const, - payload: { message: 'something went wrong' }, - } + type: "error" as const, + payload: { message: "something went wrong" }, + }; - expect(isContractStreamEventEnvelope(event)).toBe(true) - expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true) - }) + expect(isContractStreamEventEnvelope(event)).toBe(true); + expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true); + }); - it('accepts contract tool call events', () => { + it("accepts contract tool call events", () => { const event = { ...BASE_ENVELOPE, - type: 'tool' as const, + type: "tool" as const, payload: { - toolCallId: 'tc-1', - toolName: 'read', - phase: 'call' as const, - executor: 'sim' as const, - mode: 'sync' as const, + toolCallId: "tc-1", + toolName: "read", + phase: "call" as const, + executor: "sim" as const, + mode: "sync" as const, }, - } + }; - expect(isContractStreamEventEnvelope(event)).toBe(true) - expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true) - }) + expect(isContractStreamEventEnvelope(event)).toBe(true); + expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true); + }); - it('accepts contract span events', () => { + it("accepts contract span events", () => { const event = { ...BASE_ENVELOPE, - type: 'span' as const, - payload: { kind: 'subagent' as const, event: 'start' as const, agent: 'file' }, - } + type: "span" as const, + payload: { + kind: "subagent" as const, + event: "start" as const, + agent: "file", + }, + }; - expect(isContractStreamEventEnvelope(event)).toBe(true) - expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true) - }) + expect(isContractStreamEventEnvelope(event)).toBe(true); + expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true); + }); - it('accepts contract resource events', () => { + it("accepts contract resource events", () => { const event = { ...BASE_ENVELOPE, - type: 'resource' as const, + type: "resource" as const, payload: { - op: 'upsert' as const, - resource: { id: 'r-1', type: 'file', title: 'test.md' }, + op: "upsert" as const, + resource: { id: "r-1", type: "file", title: "test.md" }, }, - } + }; - expect(isContractStreamEventEnvelope(event)).toBe(true) - expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true) - }) + expect(isContractStreamEventEnvelope(event)).toBe(true); + expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true); + }); - it('accepts contract run events', () => { + it("accepts contract run events", () => { const event = { ...BASE_ENVELOPE, - type: 'run' as const, - payload: { kind: 'compaction_start' as const }, - } + type: "run" as const, + payload: { kind: "compaction_start" as const }, + }; - expect(isContractStreamEventEnvelope(event)).toBe(true) - expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true) - }) + expect(isContractStreamEventEnvelope(event)).toBe(true); + expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true); + }); - it('accepts synthetic file preview events', () => { + it("accepts synthetic file preview events", () => { const event = { ...BASE_ENVELOPE, - type: 'tool' as const, + type: "tool" as const, payload: { - toolCallId: 'preview-1', - toolName: 'workspace_file' as const, - previewPhase: 'file_preview_content' as const, - content: 'draft body', - contentMode: 'snapshot' as const, + toolCallId: "preview-1", + toolName: "workspace_file" as const, + previewPhase: "file_preview_content" as const, + content: "draft body", + contentMode: "snapshot" as const, previewVersion: 2, - fileName: 'draft.md', + fileName: "draft.md", }, - } + }; - expect(isSyntheticFilePreviewEventEnvelope(event)).toBe(true) + expect(isSyntheticFilePreviewEventEnvelope(event)).toBe(true); - const parsed = parsePersistedStreamEventEnvelope(event) + const parsed = parsePersistedStreamEventEnvelope(event); expect(parsed).toEqual({ ok: true, event, - }) - }) + }); + }); - it('rejects invalid tool events with structured validation errors', () => { + it("rejects invalid tool events with structured validation errors", () => { const parsed = parsePersistedStreamEventEnvelope({ ...BASE_ENVELOPE, - type: 'tool', + type: "tool", payload: { - toolCallId: 'tool-1', - toolName: 'read', + toolCallId: "tool-1", + toolName: "read", }, - }) + }); - expect(parsed.ok).toBe(false) + expect(parsed.ok).toBe(false); if (parsed.ok) { - throw new Error('expected invalid result') + throw new Error("expected invalid result"); } - expect(parsed.reason).toBe('invalid_stream_event') - }) + expect(parsed.reason).toBe("invalid_stream_event"); + }); - it('rejects unknown event types', () => { + it("rejects unknown event types", () => { const parsed = parsePersistedStreamEventEnvelope({ ...BASE_ENVELOPE, - type: 'unknown_type', + type: "unknown_type", payload: {}, - }) + }); - expect(parsed.ok).toBe(false) + expect(parsed.ok).toBe(false); if (parsed.ok) { - throw new Error('expected invalid result') + throw new Error("expected invalid result"); } - expect(parsed.reason).toBe('invalid_stream_event') - expect(parsed.errors).toContain('unknown type="unknown_type"') - }) + expect(parsed.reason).toBe("invalid_stream_event"); + expect(parsed.errors).toContain('unknown type="unknown_type"'); + }); - it('rejects non-object values', () => { - const parsed = parsePersistedStreamEventEnvelope('not an object') + it("rejects non-object values", () => { + const parsed = parsePersistedStreamEventEnvelope("not an object"); - expect(parsed.ok).toBe(false) + expect(parsed.ok).toBe(false); if (parsed.ok) { - throw new Error('expected invalid result') + throw new Error("expected invalid result"); } - expect(parsed.reason).toBe('invalid_stream_event') - expect(parsed.errors).toContain('value is not an object') - }) + expect(parsed.reason).toBe("invalid_stream_event"); + expect(parsed.errors).toContain("value is not an object"); + }); - it('reports invalid JSON separately from schema failures', () => { - const parsed = parsePersistedStreamEventEnvelopeJson('{') + it("reports invalid JSON separately from schema failures", () => { + const parsed = parsePersistedStreamEventEnvelopeJson("{"); - expect(parsed.ok).toBe(false) + expect(parsed.ok).toBe(false); if (parsed.ok) { - throw new Error('expected invalid json result') + throw new Error("expected invalid json result"); } - expect(parsed.reason).toBe('invalid_json') - }) -}) + expect(parsed.reason).toBe("invalid_json"); + }); +}); diff --git a/apps/sim/lib/copilot/request/session/contract.ts b/apps/sim/lib/copilot/request/session/contract.ts index ff45dbd9151..556e07e9261 100644 --- a/apps/sim/lib/copilot/request/session/contract.ts +++ b/apps/sim/lib/copilot/request/session/contract.ts @@ -3,7 +3,7 @@ import type { MothershipStreamV1StreamRef, MothershipStreamV1StreamScope, MothershipStreamV1Trace, -} from '@/lib/copilot/generated/mothership-stream-v1' +} from "@/lib/copilot/generated/mothership-stream-v1"; import { MothershipStreamV1EventType, MothershipStreamV1ResourceOp, @@ -12,78 +12,79 @@ import { MothershipStreamV1SpanPayloadKind, MothershipStreamV1TextChannel, MothershipStreamV1ToolPhase, -} from '@/lib/copilot/generated/mothership-stream-v1' -import type { FilePreviewTargetKind } from './file-preview-session-contract' +} from "@/lib/copilot/generated/mothership-stream-v1"; +import type { FilePreviewTargetKind } from "./file-preview-session-contract"; -type JsonRecord = Record +type JsonRecord = Record; const FILE_PREVIEW_PHASE = { - start: 'file_preview_start', - target: 'file_preview_target', - editMeta: 'file_preview_edit_meta', - content: 'file_preview_content', - complete: 'file_preview_complete', -} as const + start: "file_preview_start", + target: "file_preview_target", + editMeta: "file_preview_edit_meta", + content: "file_preview_content", + complete: "file_preview_complete", +} as const; type EnvelopeToStreamEvent = T extends { - type: infer TType - payload: infer TPayload - scope?: infer TScope + type: infer TType; + payload: infer TPayload; + scope?: infer TScope; } ? { type: TType; payload: TPayload; scope?: Exclude } - : never + : never; -export type SyntheticFilePreviewPhase = (typeof FILE_PREVIEW_PHASE)[keyof typeof FILE_PREVIEW_PHASE] +export type SyntheticFilePreviewPhase = + (typeof FILE_PREVIEW_PHASE)[keyof typeof FILE_PREVIEW_PHASE]; export interface SyntheticFilePreviewTarget { - kind: FilePreviewTargetKind - fileId?: string - fileName?: string + kind: FilePreviewTargetKind; + fileId?: string; + fileName?: string; } export interface SyntheticFilePreviewStartPayload { - previewPhase: typeof FILE_PREVIEW_PHASE.start - toolCallId: string - toolName: 'workspace_file' + previewPhase: typeof FILE_PREVIEW_PHASE.start; + toolCallId: string; + toolName: "workspace_file"; } export interface SyntheticFilePreviewTargetPayload { - operation?: string - previewPhase: typeof FILE_PREVIEW_PHASE.target - target: SyntheticFilePreviewTarget - title?: string - toolCallId: string - toolName: 'workspace_file' + operation?: string; + previewPhase: typeof FILE_PREVIEW_PHASE.target; + target: SyntheticFilePreviewTarget; + title?: string; + toolCallId: string; + toolName: "workspace_file"; } export interface SyntheticFilePreviewEditMetaPayload { - edit: JsonRecord - previewPhase: typeof FILE_PREVIEW_PHASE.editMeta - toolCallId: string - toolName: 'workspace_file' + edit: JsonRecord; + previewPhase: typeof FILE_PREVIEW_PHASE.editMeta; + toolCallId: string; + toolName: "workspace_file"; } export interface SyntheticFilePreviewContentPayload { - content: string - contentMode: 'delta' | 'snapshot' - edit?: JsonRecord - fileId?: string - fileName: string - operation?: string - previewPhase: typeof FILE_PREVIEW_PHASE.content - previewVersion: number - targetKind?: string - toolCallId: string - toolName: 'workspace_file' + content: string; + contentMode: "delta" | "snapshot"; + edit?: JsonRecord; + fileId?: string; + fileName: string; + operation?: string; + previewPhase: typeof FILE_PREVIEW_PHASE.content; + previewVersion: number; + targetKind?: string; + toolCallId: string; + toolName: "workspace_file"; } export interface SyntheticFilePreviewCompletePayload { - fileId?: string - output?: unknown - previewPhase: typeof FILE_PREVIEW_PHASE.complete - previewVersion?: number - toolCallId: string - toolName: 'workspace_file' + fileId?: string; + output?: unknown; + previewPhase: typeof FILE_PREVIEW_PHASE.complete; + previewVersion?: number; + toolCallId: string; + toolName: "workspace_file"; } export type SyntheticFilePreviewPayload = @@ -91,96 +92,105 @@ export type SyntheticFilePreviewPayload = | SyntheticFilePreviewTargetPayload | SyntheticFilePreviewEditMetaPayload | SyntheticFilePreviewContentPayload - | SyntheticFilePreviewCompletePayload + | SyntheticFilePreviewCompletePayload; export interface SyntheticFilePreviewEventEnvelope { - payload: SyntheticFilePreviewPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'tool' - v: 1 + payload: SyntheticFilePreviewPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "tool"; + v: 1; } export type PersistedStreamEventEnvelope = | MothershipStreamV1EventEnvelope - | SyntheticFilePreviewEventEnvelope - -export type ContractStreamEvent = EnvelopeToStreamEvent -export type SyntheticStreamEvent = EnvelopeToStreamEvent -export type SessionStreamEvent = ContractStreamEvent | SyntheticStreamEvent -export type StreamEvent = SessionStreamEvent + | SyntheticFilePreviewEventEnvelope; + +export type ContractStreamEvent = + EnvelopeToStreamEvent; +export type SyntheticStreamEvent = + EnvelopeToStreamEvent; +export type SessionStreamEvent = ContractStreamEvent | SyntheticStreamEvent; +export type StreamEvent = SessionStreamEvent; export type ToolCallStreamEvent = Extract< ContractStreamEvent, - { type: 'tool'; payload: { phase: 'call' } } -> + { type: "tool"; payload: { phase: "call" } } +>; export type ToolArgsDeltaStreamEvent = Extract< ContractStreamEvent, - { type: 'tool'; payload: { phase: 'args_delta' } } -> + { type: "tool"; payload: { phase: "args_delta" } } +>; export type ToolResultStreamEvent = Extract< ContractStreamEvent, - { type: 'tool'; payload: { phase: 'result' } } -> + { type: "tool"; payload: { phase: "result" } } +>; export type SubagentSpanStreamEvent = Extract< ContractStreamEvent, - { type: 'span'; payload: { kind: 'subagent' } } -> + { type: "span"; payload: { kind: "subagent" } } +>; export interface ParseStreamEventEnvelopeSuccess { - ok: true - event: PersistedStreamEventEnvelope + ok: true; + event: PersistedStreamEventEnvelope; } export interface ParseStreamEventEnvelopeFailure { - errors?: string[] - message: string - ok: false - reason: 'invalid_json' | 'invalid_stream_event' + errors?: string[]; + message: string; + ok: false; + reason: "invalid_json" | "invalid_stream_event"; } export type ParseStreamEventEnvelopeResult = | ParseStreamEventEnvelopeSuccess - | ParseStreamEventEnvelopeFailure + | ParseStreamEventEnvelopeFailure; // --------------------------------------------------------------------------- // Structural helpers (CSP-safe – no codegen / eval / new Function) // --------------------------------------------------------------------------- function isRecord(value: unknown): value is JsonRecord { - return Boolean(value) && typeof value === 'object' && !Array.isArray(value) + return Boolean(value) && typeof value === "object" && !Array.isArray(value); } function isOptionalString(value: unknown): value is string | undefined { - return value === undefined || typeof value === 'string' + return value === undefined || typeof value === "string"; } function isOptionalFiniteNumber(value: unknown): value is number | undefined { - return value === undefined || (typeof value === 'number' && Number.isFinite(value)) + return ( + value === undefined || (typeof value === "number" && Number.isFinite(value)) + ); } function isStreamRef(value: unknown): value is MothershipStreamV1StreamRef { return ( isRecord(value) && - typeof value.streamId === 'string' && + typeof value.streamId === "string" && isOptionalString(value.chatId) && isOptionalString(value.cursor) - ) + ); } function isTrace(value: unknown): value is MothershipStreamV1Trace { - return isRecord(value) && typeof value.requestId === 'string' && isOptionalString(value.spanId) + return ( + isRecord(value) && + typeof value.requestId === "string" && + isOptionalString(value.goTraceId) && + isOptionalString(value.spanId) + ); } function isStreamScope(value: unknown): value is MothershipStreamV1StreamScope { return ( isRecord(value) && - value.lane === 'subagent' && + value.lane === "subagent" && isOptionalString(value.agentId) && isOptionalString(value.parentToolCallId) - ) + ); } // --------------------------------------------------------------------------- @@ -193,42 +203,46 @@ function isStreamScope(value: unknown): value is MothershipStreamV1StreamScope { // structural checking to safely dispatch inside the switch statement. // --------------------------------------------------------------------------- -const KNOWN_EVENT_TYPES: ReadonlySet = new Set(Object.values(MothershipStreamV1EventType)) +const KNOWN_EVENT_TYPES: ReadonlySet = new Set( + Object.values(MothershipStreamV1EventType), +); function isValidEnvelopeShell(value: unknown): value is JsonRecord & { - v: 1 - seq: number - ts: string - stream: MothershipStreamV1StreamRef - type: string - payload: JsonRecord + v: 1; + seq: number; + ts: string; + stream: MothershipStreamV1StreamRef; + type: string; + payload: JsonRecord; } { - if (!isRecord(value)) return false - if (value.v !== 1) return false - if (typeof value.seq !== 'number' || !Number.isFinite(value.seq)) return false - if (typeof value.ts !== 'string') return false - if (!isStreamRef(value.stream)) return false - if (value.trace !== undefined && !isTrace(value.trace)) return false - if (value.scope !== undefined && !isStreamScope(value.scope)) return false - if (typeof value.type !== 'string' || !KNOWN_EVENT_TYPES.has(value.type)) return false - if (!isRecord(value.payload)) return false - return true + if (!isRecord(value)) return false; + if (value.v !== 1) return false; + if (typeof value.seq !== "number" || !Number.isFinite(value.seq)) + return false; + if (typeof value.ts !== "string") return false; + if (!isStreamRef(value.stream)) return false; + if (value.trace !== undefined && !isTrace(value.trace)) return false; + if (value.scope !== undefined && !isStreamScope(value.scope)) return false; + if (typeof value.type !== "string" || !KNOWN_EVENT_TYPES.has(value.type)) + return false; + if (!isRecord(value.payload)) return false; + return true; } function isValidSessionPayload(payload: JsonRecord): boolean { - const kind = payload.kind - if (typeof kind !== 'string') return false + const kind = payload.kind; + if (typeof kind !== "string") return false; switch (kind) { case MothershipStreamV1SessionKind.start: - return true + return true; case MothershipStreamV1SessionKind.chat: - return typeof payload.chatId === 'string' + return typeof payload.chatId === "string"; case MothershipStreamV1SessionKind.title: - return typeof payload.title === 'string' + return typeof payload.title === "string"; case MothershipStreamV1SessionKind.trace: - return typeof payload.requestId === 'string' + return typeof payload.requestId === "string"; default: - return false + return false; } } @@ -236,28 +250,28 @@ function isValidTextPayload(payload: JsonRecord): boolean { return ( (payload.channel === MothershipStreamV1TextChannel.assistant || payload.channel === MothershipStreamV1TextChannel.thinking) && - typeof payload.text === 'string' - ) + typeof payload.text === "string" + ); } function isValidToolPayload(payload: JsonRecord): boolean { - if (typeof payload.toolCallId !== 'string') return false - if (typeof payload.toolName !== 'string') return false - const phase = payload.phase + if (typeof payload.toolCallId !== "string") return false; + if (typeof payload.toolName !== "string") return false; + const phase = payload.phase; return ( phase === MothershipStreamV1ToolPhase.call || phase === MothershipStreamV1ToolPhase.args_delta || phase === MothershipStreamV1ToolPhase.result - ) + ); } function isValidSpanPayload(payload: JsonRecord): boolean { - const kind = payload.kind + const kind = payload.kind; return ( kind === MothershipStreamV1SpanPayloadKind.subagent || kind === MothershipStreamV1SpanPayloadKind.structured_result || kind === MothershipStreamV1SpanPayloadKind.subagent_result - ) + ); } function isValidResourcePayload(payload: JsonRecord): boolean { @@ -265,51 +279,55 @@ function isValidResourcePayload(payload: JsonRecord): boolean { (payload.op === MothershipStreamV1ResourceOp.upsert || payload.op === MothershipStreamV1ResourceOp.remove) && isRecord(payload.resource) && - typeof (payload.resource as JsonRecord).id === 'string' && - typeof (payload.resource as JsonRecord).type === 'string' - ) + typeof (payload.resource as JsonRecord).id === "string" && + typeof (payload.resource as JsonRecord).type === "string" + ); } function isValidRunPayload(payload: JsonRecord): boolean { - const kind = payload.kind + const kind = payload.kind; return ( kind === MothershipStreamV1RunKind.checkpoint_pause || kind === MothershipStreamV1RunKind.resumed || kind === MothershipStreamV1RunKind.compaction_start || kind === MothershipStreamV1RunKind.compaction_done - ) + ); } function isValidErrorPayload(payload: JsonRecord): boolean { - return typeof payload.message === 'string' || typeof payload.error === 'string' + return ( + typeof payload.message === "string" || typeof payload.error === "string" + ); } function isValidCompletePayload(payload: JsonRecord): boolean { - return typeof payload.status === 'string' + return typeof payload.status === "string"; } -function isContractEnvelope(value: unknown): value is MothershipStreamV1EventEnvelope { - if (!isValidEnvelopeShell(value)) return false - const payload = value.payload as JsonRecord +function isContractEnvelope( + value: unknown, +): value is MothershipStreamV1EventEnvelope { + if (!isValidEnvelopeShell(value)) return false; + const payload = value.payload as JsonRecord; switch (value.type) { case MothershipStreamV1EventType.session: - return isValidSessionPayload(payload) + return isValidSessionPayload(payload); case MothershipStreamV1EventType.text: - return isValidTextPayload(payload) + return isValidTextPayload(payload); case MothershipStreamV1EventType.tool: - return isValidToolPayload(payload) + return isValidToolPayload(payload); case MothershipStreamV1EventType.span: - return isValidSpanPayload(payload) + return isValidSpanPayload(payload); case MothershipStreamV1EventType.resource: - return isValidResourcePayload(payload) + return isValidResourcePayload(payload); case MothershipStreamV1EventType.run: - return isValidRunPayload(payload) + return isValidRunPayload(payload); case MothershipStreamV1EventType.error: - return isValidErrorPayload(payload) + return isValidErrorPayload(payload); case MothershipStreamV1EventType.complete: - return isValidCompletePayload(payload) + return isValidCompletePayload(payload); default: - return false + return false; } } @@ -318,97 +336,132 @@ function isContractEnvelope(value: unknown): value is MothershipStreamV1EventEnv // --------------------------------------------------------------------------- function isSyntheticEnvelopeBase( - value: unknown -): value is Omit & { payload?: unknown } { + value: unknown, +): value is Omit & { + payload?: unknown; +} { return ( isRecord(value) && value.v === 1 && - value.type === 'tool' && - typeof value.seq === 'number' && + value.type === "tool" && + typeof value.seq === "number" && Number.isFinite(value.seq) && - typeof value.ts === 'string' && + typeof value.ts === "string" && isStreamRef(value.stream) && (value.trace === undefined || isTrace(value.trace)) && (value.scope === undefined || isStreamScope(value.scope)) - ) + ); } -function isSyntheticFilePreviewTarget(value: unknown): value is SyntheticFilePreviewTarget { +function isSyntheticFilePreviewTarget( + value: unknown, +): value is SyntheticFilePreviewTarget { return ( isRecord(value) && - (value.kind === 'new_file' || value.kind === 'file_id') && + (value.kind === "new_file" || value.kind === "file_id") && isOptionalString(value.fileId) && isOptionalString(value.fileName) - ) + ); } -function isSyntheticFilePreviewPayload(value: unknown): value is SyntheticFilePreviewPayload { +function isSyntheticFilePreviewPayload( + value: unknown, +): value is SyntheticFilePreviewPayload { if (!isRecord(value)) { - return false + return false; } - if (typeof value.toolCallId !== 'string' || value.toolName !== 'workspace_file') { - return false + if ( + typeof value.toolCallId !== "string" || + value.toolName !== "workspace_file" + ) { + return false; } switch (value.previewPhase) { case FILE_PREVIEW_PHASE.start: - return true + return true; case FILE_PREVIEW_PHASE.target: return ( isSyntheticFilePreviewTarget(value.target) && isOptionalString(value.operation) && isOptionalString(value.title) - ) + ); case FILE_PREVIEW_PHASE.editMeta: - return isRecord(value.edit) + return isRecord(value.edit); case FILE_PREVIEW_PHASE.content: return ( - typeof value.content === 'string' && - (value.contentMode === 'delta' || value.contentMode === 'snapshot') && - typeof value.previewVersion === 'number' && + typeof value.content === "string" && + (value.contentMode === "delta" || value.contentMode === "snapshot") && + typeof value.previewVersion === "number" && Number.isFinite(value.previewVersion) && - typeof value.fileName === 'string' && + typeof value.fileName === "string" && isOptionalString(value.fileId) && isOptionalString(value.targetKind) && isOptionalString(value.operation) && (value.edit === undefined || isRecord(value.edit)) - ) + ); case FILE_PREVIEW_PHASE.complete: - return isOptionalString(value.fileId) && isOptionalFiniteNumber(value.previewVersion) + return ( + isOptionalString(value.fileId) && + isOptionalFiniteNumber(value.previewVersion) + ); default: - return false + return false; } } export function isSyntheticFilePreviewEventEnvelope( - value: unknown + value: unknown, ): value is SyntheticFilePreviewEventEnvelope { - return isSyntheticEnvelopeBase(value) && isSyntheticFilePreviewPayload(value.payload) + return ( + isSyntheticEnvelopeBase(value) && + isSyntheticFilePreviewPayload(value.payload) + ); } // --------------------------------------------------------------------------- // Stream event type guards // --------------------------------------------------------------------------- -export function isToolCallStreamEvent(event: SessionStreamEvent): event is ToolCallStreamEvent { - return event.type === 'tool' && isRecord(event.payload) && event.payload.phase === 'call' +export function isToolCallStreamEvent( + event: SessionStreamEvent, +): event is ToolCallStreamEvent { + return ( + event.type === "tool" && + isRecord(event.payload) && + event.payload.phase === "call" + ); } export function isToolArgsDeltaStreamEvent( - event: SessionStreamEvent + event: SessionStreamEvent, ): event is ToolArgsDeltaStreamEvent { - return event.type === 'tool' && isRecord(event.payload) && event.payload.phase === 'args_delta' + return ( + event.type === "tool" && + isRecord(event.payload) && + event.payload.phase === "args_delta" + ); } -export function isToolResultStreamEvent(event: SessionStreamEvent): event is ToolResultStreamEvent { - return event.type === 'tool' && isRecord(event.payload) && event.payload.phase === 'result' +export function isToolResultStreamEvent( + event: SessionStreamEvent, +): event is ToolResultStreamEvent { + return ( + event.type === "tool" && + isRecord(event.payload) && + event.payload.phase === "result" + ); } export function isSubagentSpanStreamEvent( - event: SessionStreamEvent + event: SessionStreamEvent, ): event is SubagentSpanStreamEvent { - return event.type === 'span' && isRecord(event.payload) && event.payload.kind === 'subagent' + return ( + event.type === "span" && + isRecord(event.payload) && + event.payload.kind === "subagent" + ); } // --------------------------------------------------------------------------- @@ -416,51 +469,56 @@ export function isSubagentSpanStreamEvent( // --------------------------------------------------------------------------- export function isContractStreamEventEnvelope( - value: unknown + value: unknown, ): value is MothershipStreamV1EventEnvelope { - return isContractEnvelope(value) + return isContractEnvelope(value); } -export function parsePersistedStreamEventEnvelope(value: unknown): ParseStreamEventEnvelopeResult { +export function parsePersistedStreamEventEnvelope( + value: unknown, +): ParseStreamEventEnvelopeResult { if (isContractEnvelope(value)) { - return { ok: true, event: value } + return { ok: true, event: value }; } if (isSyntheticFilePreviewEventEnvelope(value)) { - return { ok: true, event: value } + return { ok: true, event: value }; } - const hints: string[] = [] + const hints: string[] = []; if (!isRecord(value)) { - hints.push('value is not an object') + hints.push("value is not an object"); } else { - if (value.v !== 1) hints.push(`unexpected v=${JSON.stringify(value.v)}`) - if (typeof value.type !== 'string') hints.push('missing type') - else if (!KNOWN_EVENT_TYPES.has(value.type)) hints.push(`unknown type="${value.type}"`) - if (!isRecord(value.payload)) hints.push('missing or invalid payload') + if (value.v !== 1) hints.push(`unexpected v=${JSON.stringify(value.v)}`); + if (typeof value.type !== "string") hints.push("missing type"); + else if (!KNOWN_EVENT_TYPES.has(value.type)) + hints.push(`unknown type="${value.type}"`); + if (!isRecord(value.payload)) hints.push("missing or invalid payload"); } return { ok: false, - reason: 'invalid_stream_event', - message: 'A stream event failed validation.', + reason: "invalid_stream_event", + message: "A stream event failed validation.", ...(hints.length > 0 ? { errors: hints } : {}), - } + }; } -export function parsePersistedStreamEventEnvelopeJson(raw: string): ParseStreamEventEnvelopeResult { - let parsed: unknown +export function parsePersistedStreamEventEnvelopeJson( + raw: string, +): ParseStreamEventEnvelopeResult { + let parsed: unknown; try { - parsed = JSON.parse(raw) + parsed = JSON.parse(raw); } catch (error) { - const rawMessage = error instanceof Error ? error.message : 'Invalid JSON' + const rawMessage = error instanceof Error ? error.message : "Invalid JSON"; return { ok: false, - reason: 'invalid_json', - message: 'Received invalid JSON while parsing a stream event.', + reason: "invalid_json", + message: "Received invalid JSON while parsing a stream event.", ...(rawMessage ? { errors: [rawMessage] } : {}), - } + }; } - return parsePersistedStreamEventEnvelope(parsed) + return parsePersistedStreamEventEnvelope(parsed); } diff --git a/apps/sim/lib/copilot/request/session/explicit-abort.ts b/apps/sim/lib/copilot/request/session/explicit-abort.ts index 7aad5465544..6b678647215 100644 --- a/apps/sim/lib/copilot/request/session/explicit-abort.ts +++ b/apps/sim/lib/copilot/request/session/explicit-abort.ts @@ -1,40 +1,66 @@ -import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' -import { env } from '@/lib/core/config/env' +import type { Context } from "@opentelemetry/api"; +import { SIM_AGENT_API_URL } from "@/lib/copilot/constants"; +import { fetchGo } from "@/lib/copilot/request/go/fetch"; +import { env } from "@/lib/core/config/env"; -export const DEFAULT_EXPLICIT_ABORT_TIMEOUT_MS = 3000 +export const DEFAULT_EXPLICIT_ABORT_TIMEOUT_MS = 3000; export async function requestExplicitStreamAbort(params: { - streamId: string - userId: string - chatId?: string - timeoutMs?: number + streamId: string; + userId: string; + chatId?: string; + timeoutMs?: number; + otelContext?: Context; }): Promise { - const { streamId, userId, chatId, timeoutMs = DEFAULT_EXPLICIT_ABORT_TIMEOUT_MS } = params + const { + streamId, + userId, + chatId, + timeoutMs = DEFAULT_EXPLICIT_ABORT_TIMEOUT_MS, + otelContext, + } = params; - const headers: Record = { 'Content-Type': 'application/json' } + const headers: Record = { + "Content-Type": "application/json", + }; if (env.COPILOT_API_KEY) { - headers['x-api-key'] = env.COPILOT_API_KEY + headers["x-api-key"] = env.COPILOT_API_KEY; } - const controller = new AbortController() - const timeout = setTimeout(() => controller.abort('timeout:go_explicit_abort_fetch'), timeoutMs) + const controller = new AbortController(); + const timeout = setTimeout( + () => controller.abort("timeout:go_explicit_abort_fetch"), + timeoutMs, + ); try { - const response = await fetch(`${SIM_AGENT_API_URL}/api/streams/explicit-abort`, { - method: 'POST', - headers, - signal: controller.signal, - body: JSON.stringify({ - messageId: streamId, - userId, - ...(chatId ? { chatId } : {}), - }), - }) + const response = await fetchGo( + `${SIM_AGENT_API_URL}/api/streams/explicit-abort`, + { + method: "POST", + headers, + signal: controller.signal, + body: JSON.stringify({ + messageId: streamId, + userId, + ...(chatId ? { chatId } : {}), + }), + otelContext, + spanName: "sim → go /api/streams/explicit-abort", + operation: "explicit_abort", + attributes: { + "copilot.stream.id": streamId, + ...(chatId ? { "chat.id": chatId } : {}), + }, + }, + ); if (!response.ok) { - throw new Error(`Explicit abort marker request failed: ${response.status}`) + throw new Error( + `Explicit abort marker request failed: ${response.status}`, + ); } } finally { - clearTimeout(timeout) + clearTimeout(timeout); } } diff --git a/apps/sim/lib/copilot/request/session/recovery.test.ts b/apps/sim/lib/copilot/request/session/recovery.test.ts new file mode 100644 index 00000000000..24599a01ffe --- /dev/null +++ b/apps/sim/lib/copilot/request/session/recovery.test.ts @@ -0,0 +1,38 @@ +/** + * @vitest-environment node + */ + +import { describe, expect, it, vi } from "vitest"; + +const { getLatestSeq, getOldestSeq, readEvents } = vi.hoisted(() => ({ + getLatestSeq: vi.fn(), + getOldestSeq: vi.fn(), + readEvents: vi.fn(), +})); + +vi.mock("./buffer", () => ({ + getLatestSeq, + getOldestSeq, + readEvents, +})); + +import { checkForReplayGap } from "./recovery"; + +describe("checkForReplayGap", () => { + it("uses the latest buffered request id when run metadata is missing it", async () => { + getOldestSeq.mockResolvedValue(10); + getLatestSeq.mockResolvedValue(12); + readEvents.mockResolvedValue([ + { + trace: { requestId: "req-live-123" }, + }, + ]); + + const result = await checkForReplayGap("stream-1", "1"); + + expect(readEvents).toHaveBeenCalledWith("stream-1", "11"); + expect(result?.gapDetected).toBe(true); + expect(result?.envelopes[0].trace.requestId).toBe("req-live-123"); + expect(result?.envelopes[1].trace.requestId).toBe("req-live-123"); + }); +}); diff --git a/apps/sim/lib/copilot/request/session/recovery.ts b/apps/sim/lib/copilot/request/session/recovery.ts index 74612b59211..0ae7ed563b6 100644 --- a/apps/sim/lib/copilot/request/session/recovery.ts +++ b/apps/sim/lib/copilot/request/session/recovery.ts @@ -1,29 +1,30 @@ -import { createLogger } from '@sim/logger' +import { createLogger } from "@sim/logger"; import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, -} from '@/lib/copilot/generated/mothership-stream-v1' -import { getLatestSeq, getOldestSeq } from './buffer' -import { createEvent } from './event' +} from "@/lib/copilot/generated/mothership-stream-v1"; +import { getLatestSeq, getOldestSeq, readEvents } from "./buffer"; +import { createEvent } from "./event"; -const logger = createLogger('SessionRecovery') +const logger = createLogger("SessionRecovery"); export interface ReplayGapResult { - gapDetected: true - envelopes: ReturnType[] + gapDetected: true; + envelopes: ReturnType[]; } export async function checkForReplayGap( streamId: string, - afterCursor: string + afterCursor: string, + requestId?: string, ): Promise { - const requestedAfterSeq = Number(afterCursor || '0') + const requestedAfterSeq = Number(afterCursor || "0"); if (requestedAfterSeq <= 0) { - return null + return null; } - const oldestSeq = await getOldestSeq(streamId) - const latestSeq = await getLatestSeq(streamId) + const oldestSeq = await getOldestSeq(streamId); + const latestSeq = await getLatestSeq(streamId); if ( latestSeq !== null && @@ -31,46 +32,81 @@ export async function checkForReplayGap( oldestSeq !== null && requestedAfterSeq < oldestSeq - 1 ) { - logger.warn('Replay gap detected: requested cursor is below oldest available event', { + const resolvedRequestId = await resolveReplayGapRequestId( streamId, - requestedAfterSeq, - oldestAvailableSeq: oldestSeq, latestSeq, - }) + requestId, + ); + logger.warn( + "Replay gap detected: requested cursor is below oldest available event", + { + streamId, + requestedAfterSeq, + oldestAvailableSeq: oldestSeq, + latestSeq, + }, + ); const gapEnvelope = createEvent({ streamId, cursor: String(latestSeq + 1), seq: latestSeq + 1, - requestId: '', + requestId: resolvedRequestId, type: MothershipStreamV1EventType.error, payload: { - message: 'Replay history is no longer available. Some events may have been lost.', - code: 'replay_gap', + message: + "Replay history is no longer available. Some events may have been lost.", + code: "replay_gap", data: { oldestAvailableSeq: oldestSeq, requestedAfterSeq, }, }, - }) + }); const terminalEnvelope = createEvent({ streamId, cursor: String(latestSeq + 2), seq: latestSeq + 2, - requestId: '', + requestId: resolvedRequestId, type: MothershipStreamV1EventType.complete, payload: { status: MothershipStreamV1CompletionStatus.error, - reason: 'replay_gap', + reason: "replay_gap", }, - }) + }); return { gapDetected: true, envelopes: [gapEnvelope, terminalEnvelope], - } + }; + } + + return null; +} + +async function resolveReplayGapRequestId( + streamId: string, + latestSeq: number, + requestId?: string, +): Promise { + if (typeof requestId === "string" && requestId.length > 0) { + return requestId; } - return null + try { + const latestEvents = await readEvents( + streamId, + String(Math.max(latestSeq - 1, 0)), + ); + const latestRequestId = latestEvents[0]?.trace?.requestId; + return typeof latestRequestId === "string" ? latestRequestId : ""; + } catch (error) { + logger.warn("Failed to resolve request ID for replay gap", { + streamId, + latestSeq, + error: error instanceof Error ? error.message : String(error), + }); + return ""; + } } diff --git a/apps/sim/lib/copilot/request/subagent.ts b/apps/sim/lib/copilot/request/subagent.ts index d9403094698..acf31d685f7 100644 --- a/apps/sim/lib/copilot/request/subagent.ts +++ b/apps/sim/lib/copilot/request/subagent.ts @@ -1,113 +1,139 @@ -import { createLogger } from '@sim/logger' -import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context' -import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { createLogger } from "@sim/logger"; +import { generateWorkspaceContext } from "@/lib/copilot/chat/workspace-context"; +import { SIM_AGENT_API_URL, SIM_AGENT_VERSION } from "@/lib/copilot/constants"; import { MothershipStreamV1EventType, MothershipStreamV1SpanPayloadKind, -} from '@/lib/copilot/generated/mothership-stream-v1' -import { createStreamingContext } from '@/lib/copilot/request/context/request-context' -import { buildToolCallSummaries } from '@/lib/copilot/request/context/result' -import { runStreamLoop } from '@/lib/copilot/request/go/stream' +} from "@/lib/copilot/generated/mothership-stream-v1"; +import { createStreamingContext } from "@/lib/copilot/request/context/request-context"; +import { buildToolCallSummaries } from "@/lib/copilot/request/context/result"; +import { runStreamLoop } from "@/lib/copilot/request/go/stream"; import type { ExecutionContext, OrchestratorOptions, StreamEvent, StreamingContext, ToolCallSummary, -} from '@/lib/copilot/request/types' -import { prepareExecutionContext } from '@/lib/copilot/tools/handlers/context' -import { env } from '@/lib/core/config/env' -import { isHosted } from '@/lib/core/config/feature-flags' -import { generateId } from '@/lib/core/utils/uuid' -import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' -import { getWorkflowById } from '@/lib/workflows/utils' +} from "@/lib/copilot/request/types"; +import { prepareExecutionContext } from "@/lib/copilot/tools/handlers/context"; +import { env } from "@/lib/core/config/env"; +import { isHosted } from "@/lib/core/config/feature-flags"; +import { generateId } from "@/lib/core/utils/uuid"; +import { getEffectiveDecryptedEnv } from "@/lib/environment/utils"; +import { getWorkflowById } from "@/lib/workflows/utils"; -const logger = createLogger('CopilotSubagentOrchestrator') +const logger = createLogger("CopilotSubagentOrchestrator"); -export interface SubagentOrchestratorOptions extends Omit { - userId: string - workflowId?: string - workspaceId?: string - userPermission?: string - onComplete?: (result: SubagentOrchestratorResult) => void | Promise +export interface SubagentOrchestratorOptions extends Omit< + OrchestratorOptions, + "onComplete" +> { + userId: string; + workflowId?: string; + workspaceId?: string; + userPermission?: string; + simRequestId?: string; + onComplete?: (result: SubagentOrchestratorResult) => void | Promise; } export interface SubagentOrchestratorResult { - success: boolean - content: string - toolCalls: ToolCallSummary[] + success: boolean; + content: string; + toolCalls: ToolCallSummary[]; structuredResult?: { - type?: string - summary?: string - data?: unknown - success?: boolean - } - error?: string - errors?: string[] + type?: string; + summary?: string; + data?: unknown; + success?: boolean; + }; + error?: string; + errors?: string[]; } export async function orchestrateSubagentStream( agentId: string, requestPayload: Record, - options: SubagentOrchestratorOptions + options: SubagentOrchestratorOptions, ): Promise { - const { userId, workflowId, workspaceId, userPermission } = options + const { userId, workflowId, workspaceId, userPermission } = options; const chatId = - (typeof requestPayload.chatId === 'string' && requestPayload.chatId) || generateId() - const execContext = await buildExecutionContext(userId, workflowId, workspaceId, chatId) + (typeof requestPayload.chatId === "string" && requestPayload.chatId) || + generateId(); + const execContext = await buildExecutionContext( + userId, + workflowId, + workspaceId, + chatId, + ); let resolvedWorkflowName = - typeof requestPayload.workflowName === 'string' ? requestPayload.workflowName : undefined + typeof requestPayload.workflowName === "string" + ? requestPayload.workflowName + : undefined; let resolvedWorkspaceId = execContext.workspaceId || - (typeof requestPayload.workspaceId === 'string' ? requestPayload.workspaceId : workspaceId) + (typeof requestPayload.workspaceId === "string" + ? requestPayload.workspaceId + : workspaceId); if (workflowId && (!resolvedWorkflowName || !resolvedWorkspaceId)) { - const workflow = await getWorkflowById(workflowId) - resolvedWorkflowName ||= workflow?.name || undefined - resolvedWorkspaceId ||= workflow?.workspaceId || undefined + const workflow = await getWorkflowById(workflowId); + resolvedWorkflowName ||= workflow?.name || undefined; + resolvedWorkspaceId ||= workflow?.workspaceId || undefined; } let resolvedWorkspaceContext = - typeof requestPayload.workspaceContext === 'string' + typeof requestPayload.workspaceContext === "string" ? requestPayload.workspaceContext - : undefined + : undefined; if (!resolvedWorkspaceContext && resolvedWorkspaceId) { try { - resolvedWorkspaceContext = await generateWorkspaceContext(resolvedWorkspaceId, userId) + resolvedWorkspaceContext = await generateWorkspaceContext( + resolvedWorkspaceId, + userId, + ); } catch (error) { - logger.warn('Failed to generate workspace context for subagent request', { + logger.warn("Failed to generate workspace context for subagent request", { agentId, workspaceId: resolvedWorkspaceId, error: error instanceof Error ? error.message : String(error), - }) + }); } } - const msgId = requestPayload?.messageId + const msgId = requestPayload?.messageId; const context = createStreamingContext({ chatId, - messageId: typeof msgId === 'string' ? msgId : generateId(), - }) + requestId: options.simRequestId, + messageId: typeof msgId === "string" ? msgId : generateId(), + }); - let structuredResult: SubagentOrchestratorResult['structuredResult'] + let structuredResult: SubagentOrchestratorResult["structuredResult"]; try { await runStreamLoop( `${SIM_AGENT_API_URL}/api/subagent/${agentId}`, { - method: 'POST', + method: "POST", headers: { - 'Content-Type': 'application/json', - ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), + "Content-Type": "application/json", + ...(env.COPILOT_API_KEY ? { "x-api-key": env.COPILOT_API_KEY } : {}), + "X-Client-Version": SIM_AGENT_VERSION, + ...(options.simRequestId + ? { "X-Sim-Request-ID": options.simRequestId } + : {}), }, body: JSON.stringify({ ...requestPayload, chatId, userId, stream: true, - ...(resolvedWorkflowName ? { workflowName: resolvedWorkflowName } : {}), + ...(resolvedWorkflowName + ? { workflowName: resolvedWorkflowName } + : {}), ...(resolvedWorkspaceId ? { workspaceId: resolvedWorkspaceId } : {}), - ...(resolvedWorkspaceContext ? { workspaceContext: resolvedWorkspaceContext } : {}), + ...(resolvedWorkspaceContext + ? { workspaceContext: resolvedWorkspaceContext } + : {}), isHosted, ...(userPermission ? { userPermission } : {}), }), @@ -120,22 +146,27 @@ export async function orchestrateSubagentStream( onBeforeDispatch: (event: StreamEvent, ctx: StreamingContext) => { if ( event.type === MothershipStreamV1EventType.span && - (event.payload.kind === MothershipStreamV1SpanPayloadKind.structured_result || - event.payload.kind === MothershipStreamV1SpanPayloadKind.subagent_result) + (event.payload.kind === + MothershipStreamV1SpanPayloadKind.structured_result || + event.payload.kind === + MothershipStreamV1SpanPayloadKind.subagent_result) ) { - structuredResult = normalizeStructuredResult(event.payload.data) - ctx.streamComplete = true - return true + structuredResult = normalizeStructuredResult(event.payload.data); + ctx.streamComplete = true; + return true; } - if (event.scope?.agentId === agentId && !ctx.subAgentParentToolCallId) { - return false + if ( + event.scope?.agentId === agentId && + !ctx.subAgentParentToolCallId + ) { + return false; } - return false + return false; }, - } - ) + }, + ); const result: SubagentOrchestratorResult = { success: context.errors.length === 0 && !context.wasAborted, @@ -143,48 +174,56 @@ export async function orchestrateSubagentStream( toolCalls: buildToolCallSummaries(context), structuredResult, errors: context.errors.length ? context.errors : undefined, - } - await options.onComplete?.(result) - return result + }; + await options.onComplete?.(result); + return result; } catch (error) { - const err = error instanceof Error ? error : new Error('Subagent orchestration failed') - logger.error('Subagent orchestration failed', { error: err.message, agentId }) - await options.onError?.(err) + const err = + error instanceof Error + ? error + : new Error("Subagent orchestration failed"); + logger.error("Subagent orchestration failed", { + error: err.message, + agentId, + }); + await options.onError?.(err); return { success: false, content: context.accumulatedContent, toolCalls: [], error: err.message, - } + }; } } -function normalizeStructuredResult(data: unknown): SubagentOrchestratorResult['structuredResult'] { - if (!data || typeof data !== 'object') return undefined - const d = data as Record +function normalizeStructuredResult( + data: unknown, +): SubagentOrchestratorResult["structuredResult"] { + if (!data || typeof data !== "object") return undefined; + const d = data as Record; return { type: (d.result_type || d.type) as string | undefined, summary: d.summary as string | undefined, data: d.data ?? d, success: d.success as boolean | undefined, - } + }; } async function buildExecutionContext( userId: string, workflowId?: string, workspaceId?: string, - chatId?: string + chatId?: string, ): Promise { if (workflowId) { - return prepareExecutionContext(userId, workflowId, chatId, { workspaceId }) + return prepareExecutionContext(userId, workflowId, chatId, { workspaceId }); } - const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId) + const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId); return { userId, - workflowId: workflowId || '', + workflowId: workflowId || "", workspaceId, chatId, decryptedEnvVars, - } + }; } diff --git a/apps/sim/lib/copilot/request/tools/executor.ts b/apps/sim/lib/copilot/request/tools/executor.ts index bc1cb26cde9..2016d02fa9d 100644 --- a/apps/sim/lib/copilot/request/tools/executor.ts +++ b/apps/sim/lib/copilot/request/tools/executor.ts @@ -39,6 +39,7 @@ import { type StreamingContext, type ToolCallState, } from '@/lib/copilot/request/types' +import { withCopilotToolSpan } from '@/lib/copilot/request/otel' import { ensureHandlersRegistered, executeTool } from '@/lib/copilot/tool-executor' export { waitForToolCompletion } from '@/lib/copilot/request/tools/client' @@ -53,6 +54,79 @@ function hasOutputValue(result: { output?: unknown } | undefined): result is { o return result !== undefined && Object.hasOwn(result, 'output') } +interface ToolResultSpanSummary { + resultSuccess: boolean + outputBytes: number + outputKind: string + errorMessage?: string + imageCount?: number + imageBytes?: number + attachmentMediaType?: string +} + +function summarizeToolResultForSpan(result: { + success: boolean + output?: unknown + error?: string +}): ToolResultSpanSummary { + const summary: ToolResultSpanSummary = { + resultSuccess: Boolean(result.success), + outputBytes: 0, + outputKind: 'none', + } + if (!result.success && result.error) { + summary.errorMessage = String(result.error).slice(0, 500) + } + if (!hasOutputValue(result)) { + return summary + } + const output = (result as { output: unknown }).output + if (typeof output === 'string') { + summary.outputKind = 'string' + summary.outputBytes = output.length + } else if (output && typeof output === 'object') { + summary.outputKind = Array.isArray(output) ? 'array' : 'object' + try { + summary.outputBytes = JSON.stringify(output).length + } catch { + summary.outputBytes = 0 + } + const attachment = extractAttachmentShape(output) + if (attachment) { + summary.imageCount = attachment.imageCount + summary.imageBytes = attachment.imageBytes + if (attachment.mediaType) { + summary.attachmentMediaType = attachment.mediaType + } + } + } else if (output !== undefined && output !== null) { + summary.outputKind = typeof output + summary.outputBytes = String(output).length + } + return summary +} + +function extractAttachmentShape( + output: unknown, +): { imageCount: number; imageBytes: number; mediaType?: string } | null { + if (!isRecord(output)) return null + const candidate = (output as Record).attachment + if (!isRecord(candidate)) return null + const source = (candidate as Record).source + if (!isRecord(source)) return null + const type = typeof (candidate as Record).type === 'string' + ? ((candidate as Record).type as string) + : '' + if (type !== 'image') return null + const mediaType = typeof source.media_type === 'string' ? (source.media_type as string) : undefined + const data = typeof source.data === 'string' ? (source.data as string) : '' + return { + imageCount: 1, + imageBytes: data.length, + mediaType, + } +} + function buildCompletionSignal(input: { status: AsyncCompletionSignal['status'] message?: string @@ -163,6 +237,40 @@ export async function executeToolAndReport( message: 'Tool call not found', }) + const argsPayload = toolCall.params ? (() => { + try { + return JSON.stringify(toolCall.params) + } catch { + return undefined + } + })() : undefined + return withCopilotToolSpan( + { + toolName: toolCall.name, + toolCallId: toolCall.id, + runId: context.runId, + chatId: execContext.chatId, + argsBytes: argsPayload?.length, + argsPreview: argsPayload?.slice(0, 200), + }, + async (otelSpan) => { + const completion = await executeToolAndReportInner(toolCall, context, execContext, options) + otelSpan.setAttribute('tool.outcome', completion.status) + if (completion.message) { + otelSpan.setAttribute('tool.outcome.message', String(completion.message).slice(0, 500)) + } + return completion + }, + ) +} + +async function executeToolAndReportInner( + toolCall: ToolCallState, + context: StreamingContext, + execContext: ExecutionContext, + options?: OrchestratorOptions, +): Promise { + if (toolCall.status === 'executing') { return buildCompletionSignal({ status: MothershipStreamV1AsyncToolRecordStatus.running, @@ -376,6 +484,11 @@ export async function executeToolAndReport( endToolSpan('cancelled', { cancelReason: 'abort_during_post_processing_csv' }) return cancelledCompletion('Request aborted during tool post-processing') } + toolSpan.attributes = { + ...toolSpan.attributes, + ...summarizeToolResultForSpan(result), + } + setTerminalToolCallState(toolCall, { status: result.success ? MothershipStreamV1ToolOutcome.success diff --git a/apps/sim/lib/copilot/request/trace.ts b/apps/sim/lib/copilot/request/trace.ts index 8f74f743762..5a4c8dfd498 100644 --- a/apps/sim/lib/copilot/request/trace.ts +++ b/apps/sim/lib/copilot/request/trace.ts @@ -1,5 +1,6 @@ -import { createLogger } from '@sim/logger' -import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import type { Context } from "@opentelemetry/api"; +import { createLogger } from "@sim/logger"; +import { SIM_AGENT_API_URL } from "@/lib/copilot/constants"; import { type RequestTraceV1CostSummary, RequestTraceV1Outcome, @@ -8,24 +9,24 @@ import { RequestTraceV1SpanSource, RequestTraceV1SpanStatus, type RequestTraceV1UsageSummary, -} from '@/lib/copilot/generated/request-trace-v1' -import { env } from '@/lib/core/config/env' +} from "@/lib/copilot/generated/request-trace-v1"; +import { env } from "@/lib/core/config/env"; -const logger = createLogger('RequestTrace') +const logger = createLogger("RequestTrace"); export class TraceCollector { - private readonly spans: RequestTraceV1Span[] = [] - private readonly startMs = Date.now() - private goTraceId?: string - private activeSpan?: RequestTraceV1Span + private readonly spans: RequestTraceV1Span[] = []; + private readonly startMs = Date.now(); + private goTraceId?: string; + private activeSpan?: RequestTraceV1Span; startSpan( name: string, kind: string, attributes?: Record, - parent?: RequestTraceV1Span + parent?: RequestTraceV1Span, ): RequestTraceV1Span { - const startMs = Date.now() + const startMs = Date.now(); const span: RequestTraceV1Span = { name, kind, @@ -39,55 +40,57 @@ export class TraceCollector { : this.activeSpan ? { parentName: this.activeSpan.name } : {}), - ...(attributes && Object.keys(attributes).length > 0 ? { attributes } : {}), - } - this.spans.push(span) - return span + ...(attributes && Object.keys(attributes).length > 0 + ? { attributes } + : {}), + }; + this.spans.push(span); + return span; } endSpan( span: RequestTraceV1Span, - status: RequestTraceV1SpanStatus | string = RequestTraceV1SpanStatus.ok + status: RequestTraceV1SpanStatus | string = RequestTraceV1SpanStatus.ok, ): void { - span.endMs = Date.now() - span.durationMs = span.endMs - span.startMs - span.status = status as RequestTraceV1SpanStatus + span.endMs = Date.now(); + span.durationMs = span.endMs - span.startMs; + span.status = status as RequestTraceV1SpanStatus; } setActiveSpan(span: RequestTraceV1Span | undefined): void { - this.activeSpan = span + this.activeSpan = span; } setGoTraceId(id: string): void { if (!this.goTraceId && id) { - this.goTraceId = id + this.goTraceId = id; } } build(params: { - outcome: RequestTraceV1Outcome - simRequestId: string - streamId?: string - chatId?: string - runId?: string - executionId?: string - usage?: { prompt: number; completion: number } - cost?: { input: number; output: number; total: number } + outcome: RequestTraceV1Outcome; + simRequestId: string; + streamId?: string; + chatId?: string; + runId?: string; + executionId?: string; + usage?: { prompt: number; completion: number }; + cost?: { input: number; output: number; total: number }; }): RequestTraceV1SimReport { - const endMs = Date.now() + const endMs = Date.now(); const usage: RequestTraceV1UsageSummary | undefined = params.usage ? { inputTokens: params.usage.prompt, outputTokens: params.usage.completion, } - : undefined + : undefined; const cost: RequestTraceV1CostSummary | undefined = params.cost ? { rawTotalCost: params.cost.total, billedTotalCost: params.cost.total, } - : undefined + : undefined; return { simRequestId: params.simRequestId, @@ -103,26 +106,39 @@ export class TraceCollector { usage, cost, spans: this.spans, - } + }; } } -export async function reportTrace(trace: RequestTraceV1SimReport): Promise { - const response = await fetch(`${SIM_AGENT_API_URL}/api/traces`, { - method: 'POST', +export async function reportTrace( + trace: RequestTraceV1SimReport, + otelContext?: Context, +): Promise { + const { fetchGo } = await import("@/lib/copilot/request/go/fetch"); + const body = JSON.stringify(trace); + const response = await fetchGo(`${SIM_AGENT_API_URL}/api/traces`, { + method: "POST", headers: { - 'Content-Type': 'application/json', - ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), + "Content-Type": "application/json", + ...(env.COPILOT_API_KEY ? { "x-api-key": env.COPILOT_API_KEY } : {}), + }, + body, + otelContext, + spanName: "sim → go /api/traces", + operation: "report_trace", + attributes: { + "copilot.request.id": trace.simRequestId ?? "", + "http.request.content_length": body.length, + "copilot.trace.span_count": trace.spans?.length ?? 0, }, - body: JSON.stringify(trace), - }) + }); if (!response.ok) { - logger.warn('Failed to report trace', { + logger.warn("Failed to report trace", { status: response.status, simRequestId: trace.simRequestId, - }) + }); } } -export { RequestTraceV1Outcome, RequestTraceV1SpanStatus } +export { RequestTraceV1Outcome, RequestTraceV1SpanStatus }; diff --git a/apps/sim/lib/copilot/request/types.ts b/apps/sim/lib/copilot/request/types.ts index 87416c5e4f4..2964c9f354c 100644 --- a/apps/sim/lib/copilot/request/types.ts +++ b/apps/sim/lib/copilot/request/types.ts @@ -1,6 +1,7 @@ import type { AsyncCompletionSignal } from '@/lib/copilot/async-runs/lifecycle' import { MothershipStreamV1ToolOutcome } from '@/lib/copilot/generated/mothership-stream-v1' import type { StreamEvent } from '@/lib/copilot/request/session' +import type { RequestTraceV1Span } from '@/lib/copilot/generated/request-trace-v1' import type { TraceCollector } from '@/lib/copilot/request/trace' import type { ToolExecutionContext, ToolExecutionResult } from '@/lib/copilot/tool-executor/types' @@ -99,6 +100,7 @@ export interface StreamingContext { edit?: Record } | null trace: TraceCollector + subAgentTraceSpans?: Map } export interface FileAttachment { diff --git a/apps/sim/lib/copilot/tools/handlers/vfs.test.ts b/apps/sim/lib/copilot/tools/handlers/vfs.test.ts index 7ec15c7d087..8cc02e65751 100644 --- a/apps/sim/lib/copilot/tools/handlers/vfs.test.ts +++ b/apps/sim/lib/copilot/tools/handlers/vfs.test.ts @@ -56,13 +56,14 @@ describe('vfs handlers oversize policy', () => { expect(result.error).toContain('context window') }) - it('fails oversized read results with grep guidance', async () => { + it('fails oversized read results from VFS with grep guidance', async () => { const vfs = makeVfs() + vfs.readFileContent.mockResolvedValue(null) vfs.read.mockReturnValue({ content: OVERSIZED_INLINE_CONTENT, totalLines: 1 }) getOrMaterializeVFS.mockResolvedValue(vfs) const result = await executeVfsRead( - { path: 'files/big.txt' }, + { path: 'workflows/My Workflow/state.json' }, { userId: 'user-1', workflowId: 'wf-1', workspaceId: 'ws-1' } ) @@ -72,9 +73,8 @@ describe('vfs handlers oversize policy', () => { expect(result.error).toContain('context window') }) - it('fails file-backed oversized read placeholders with grep guidance', async () => { + it('fails file-backed oversized read placeholders with original message', async () => { const vfs = makeVfs() - vfs.read.mockReturnValue(null) vfs.readFileContent.mockResolvedValue({ content: '[File too large to display inline: big.txt (6000000 bytes, limit 5242880)]', totalLines: 1, @@ -87,8 +87,46 @@ describe('vfs handlers oversize policy', () => { ) expect(result.success).toBe(false) - expect(result.error).toContain('Use grep') - expect(result.error).toContain('offset/limit') - expect(result.error).toContain('context window') + expect(result.error).toContain('File too large to display inline') + expect(result.error).toContain('big.txt') + }) + + it('passes through image reads with attachment even when oversized', async () => { + const vfs = makeVfs() + const largeBase64 = 'A'.repeat(TOOL_RESULT_MAX_INLINE_CHARS + 1) + vfs.readFileContent.mockResolvedValue({ + content: 'Image: chess.png (500.0KB, image/png)', + totalLines: 1, + attachment: { + type: 'image', + source: { type: 'base64', media_type: 'image/png', data: largeBase64 }, + }, + }) + getOrMaterializeVFS.mockResolvedValue(vfs) + + const result = await executeVfsRead( + { path: 'files/chess.png' }, + { userId: 'user-1', workflowId: 'wf-1', workspaceId: 'ws-1' } + ) + + expect(result.success).toBe(true) + expect((result.output as { attachment?: { type: string } })?.attachment?.type).toBe('image') + }) + + it('fails oversized image placeholder when image exceeds size limit', async () => { + const vfs = makeVfs() + vfs.readFileContent.mockResolvedValue({ + content: '[Image too large: huge.png (10.0MB, limit 5MB)]', + totalLines: 1, + }) + getOrMaterializeVFS.mockResolvedValue(vfs) + + const result = await executeVfsRead( + { path: 'files/huge.png' }, + { userId: 'user-1', workflowId: 'wf-1', workspaceId: 'ws-1' } + ) + + expect(result.success).toBe(false) + expect(result.error).toContain('too large') }) }) diff --git a/apps/sim/lib/copilot/tools/handlers/vfs.ts b/apps/sim/lib/copilot/tools/handlers/vfs.ts index 67430f61fd2..35ab7b9c23b 100644 --- a/apps/sim/lib/copilot/tools/handlers/vfs.ts +++ b/apps/sim/lib/copilot/tools/handlers/vfs.ts @@ -160,21 +160,30 @@ export async function executeVfsRead( const filename = path.slice('uploads/'.length) const uploadResult = await readChatUpload(filename, context.chatId) if (uploadResult) { + const isImage = hasImageAttachment(uploadResult) if ( - !hasImageAttachment(uploadResult) && + !isImage && (isOversizedReadPlaceholder(uploadResult.content) || serializedResultSize(uploadResult) > TOOL_RESULT_MAX_INLINE_CHARS) ) { + logger.warn('Upload read result too large', { + path, + hasAttachment: isImage, + contentPreview: uploadResult.content.slice(0, 120), + serializedSize: serializedResultSize(uploadResult), + }) return { success: false, - error: - 'Read result too large to return inline. Use grep with a more specific pattern or narrower path to locate the relevant section, then retry read with offset/limit. Avoid catch-all greps or full-file reads because they waste context window.', + error: isOversizedReadPlaceholder(uploadResult.content) + ? uploadResult.content + : 'Read result too large to return inline. Use grep with a more specific pattern or narrower path to locate the relevant section, then retry read with offset/limit. Avoid catch-all greps or full-file reads because they waste context window.', } } const windowedUpload = applyWindow(uploadResult) logger.debug('vfs_read resolved chat upload', { path, totalLines: uploadResult.totalLines, + hasAttachment: isImage, offset, limit, }) @@ -187,34 +196,47 @@ export async function executeVfsRead( } const vfs = await getOrMaterializeVFS(workspaceId, context.userId) - const result = vfs.read(path, offset, limit) - if (!result) { - const fileContent = await vfs.readFileContent(path) - if (fileContent) { - if ( - !hasImageAttachment(fileContent) && - (isOversizedReadPlaceholder(fileContent.content) || - serializedResultSize(fileContent) > TOOL_RESULT_MAX_INLINE_CHARS) - ) { - return { - success: false, - error: - 'Read result too large to return inline. Use grep with a more specific pattern or narrower path to locate the relevant section, then retry read with offset/limit. Avoid catch-all greps or full-file reads because they waste context window.', - } - } - const windowedFileContent = applyWindow(fileContent) - logger.debug('vfs_read resolved workspace file', { + + // For workspace file paths (files/ or recently-deleted/files/), try readFileContent + // first so images, PDFs, and documents get proper attachment/parsing handling rather + // than being served as raw VFS metadata text. + const fileContent = await vfs.readFileContent(path) + if (fileContent) { + const isImage = hasImageAttachment(fileContent) + if ( + !isImage && + (isOversizedReadPlaceholder(fileContent.content) || + serializedResultSize(fileContent) > TOOL_RESULT_MAX_INLINE_CHARS) + ) { + logger.warn('File read result too large', { path, - totalLines: fileContent.totalLines, - offset, - limit, + hasAttachment: isImage, + contentPreview: fileContent.content.slice(0, 120), + serializedSize: serializedResultSize(fileContent), }) return { - success: true, - output: windowedFileContent, + success: false, + error: isOversizedReadPlaceholder(fileContent.content) + ? fileContent.content + : 'Read result too large to return inline. Use grep with a more specific pattern or narrower path to locate the relevant section, then retry read with offset/limit. Avoid catch-all greps or full-file reads because they waste context window.', } } + const windowedFileContent = applyWindow(fileContent) + logger.debug('vfs_read resolved workspace file', { + path, + totalLines: fileContent.totalLines, + hasAttachment: isImage, + offset, + limit, + }) + return { + success: true, + output: windowedFileContent, + } + } + const result = vfs.read(path, offset, limit) + if (!result) { const suggestions = vfs.suggestSimilar(path) logger.warn('vfs_read file not found', { path, suggestions }) const hint = diff --git a/apps/sim/lib/copilot/vfs/file-reader.test.ts b/apps/sim/lib/copilot/vfs/file-reader.test.ts new file mode 100644 index 00000000000..0efe948bf57 --- /dev/null +++ b/apps/sim/lib/copilot/vfs/file-reader.test.ts @@ -0,0 +1,92 @@ +/** + * @vitest-environment node + */ + +import { randomFillSync } from 'node:crypto' +import { loggerMock } from '@sim/testing' +import { describe, expect, it, vi } from 'vitest' + +const { downloadWorkspaceFile } = vi.hoisted(() => ({ + downloadWorkspaceFile: vi.fn(), +})) + +vi.mock('@sim/logger', () => loggerMock) +vi.mock('@/lib/uploads/contexts/workspace/workspace-file-manager', () => ({ + downloadWorkspaceFile, +})) + +import { readFileRecord } from '@/lib/copilot/vfs/file-reader' + +const MAX_IMAGE_READ_BYTES = 5 * 1024 * 1024 + +async function makeNoisePng(width: number, height: number): Promise { + const sharp = (await import('sharp')).default + const raw = Buffer.alloc(width * height * 3) + randomFillSync(raw) + return sharp(raw, { raw: { width, height, channels: 3 } }).png().toBuffer() +} + +describe('readFileRecord', () => { + it('returns small images as attachments without resize note', async () => { + const sharp = (await import('sharp')).default + const smallPng = await sharp({ + create: { + width: 200, + height: 200, + channels: 3, + background: { r: 255, g: 0, b: 0 }, + }, + }) + .png() + .toBuffer() + + downloadWorkspaceFile.mockResolvedValue(smallPng) + + const result = await readFileRecord({ + id: 'wf_small', + workspaceId: 'ws_1', + name: 'small.png', + key: 'uploads/small.png', + path: '/api/files/serve/uploads%2Fsmall.png?context=mothership', + size: smallPng.length, + type: 'image/png', + uploadedBy: 'user_1', + uploadedAt: new Date(), + deletedAt: null, + storageContext: 'mothership', + }) + + expect(result?.attachment?.type).toBe('image') + expect(result?.attachment?.source.media_type).toBe('image/png') + expect(result?.content).not.toContain('resized for vision') + expect(Buffer.from(result?.attachment?.source.data ?? '', 'base64')).toEqual(smallPng) + }) + + it('downscales oversized images into attachments that fit the read limit', async () => { + const largePng = await makeNoisePng(1800, 1800) + expect(largePng.length).toBeGreaterThan(MAX_IMAGE_READ_BYTES) + + downloadWorkspaceFile.mockResolvedValue(largePng) + + const result = await readFileRecord({ + id: 'wf_large', + workspaceId: 'ws_1', + name: 'chesspng.png', + key: 'uploads/chesspng.png', + path: '/api/files/serve/uploads%2Fchesspng.png?context=mothership', + size: largePng.length, + type: 'image/png', + uploadedBy: 'user_1', + uploadedAt: new Date(), + deletedAt: null, + storageContext: 'mothership', + }) + + expect(result?.attachment?.type).toBe('image') + expect(result?.content).toContain('resized for vision') + + const decoded = Buffer.from(result?.attachment?.source.data ?? '', 'base64') + expect(decoded.length).toBeLessThanOrEqual(MAX_IMAGE_READ_BYTES) + expect(result?.attachment?.source.media_type).toMatch(/^image\/(jpeg|webp|png)$/) + }) +}) diff --git a/apps/sim/lib/copilot/vfs/file-reader.ts b/apps/sim/lib/copilot/vfs/file-reader.ts index 00f2e2dc55e..faf08c3608d 100644 --- a/apps/sim/lib/copilot/vfs/file-reader.ts +++ b/apps/sim/lib/copilot/vfs/file-reader.ts @@ -7,6 +7,9 @@ const logger = createLogger('FileReader') const MAX_TEXT_READ_BYTES = 5 * 1024 * 1024 // 5 MB const MAX_IMAGE_READ_BYTES = 5 * 1024 * 1024 // 5 MB +const MAX_IMAGE_DIMENSION = 1568 +const IMAGE_RESIZE_DIMENSIONS = [1568, 1280, 1024, 768] +const IMAGE_QUALITY_STEPS = [85, 70, 55, 40] const TEXT_TYPES = new Set([ 'text/plain', @@ -41,6 +44,114 @@ function detectImageMime(buf: Buffer, claimed: string): string { return claimed } +interface PreparedVisionImage { + buffer: Buffer + mediaType: string + resized: boolean +} + +async function prepareImageForVision( + buffer: Buffer, + claimedType: string +): Promise { + const mediaType = detectImageMime(buffer, claimedType) + + let sharpModule: typeof import('sharp').default + try { + sharpModule = (await import('sharp')).default + } catch (err) { + logger.warn('Failed to load sharp for image preparation', { + mediaType, + error: err instanceof Error ? err.message : String(err), + }) + return buffer.length <= MAX_IMAGE_READ_BYTES ? { buffer, mediaType, resized: false } : null + } + + let metadata: Awaited['metadata']>> + try { + metadata = await sharpModule(buffer, { limitInputPixels: false }).metadata() + } catch (err) { + logger.warn('Failed to read image metadata for VFS read', { + mediaType, + error: err instanceof Error ? err.message : String(err), + }) + return buffer.length <= MAX_IMAGE_READ_BYTES ? { buffer, mediaType, resized: false } : null + } + + const width = metadata.width ?? 0 + const height = metadata.height ?? 0 + const needsResize = + buffer.length > MAX_IMAGE_READ_BYTES || + width > MAX_IMAGE_DIMENSION || + height > MAX_IMAGE_DIMENSION + if (!needsResize) { + return { buffer, mediaType, resized: false } + } + + const hasAlpha = Boolean( + metadata.hasAlpha || + mediaType === 'image/png' || + mediaType === 'image/webp' || + mediaType === 'image/gif' + ) + + for (const dimension of IMAGE_RESIZE_DIMENSIONS) { + for (const quality of IMAGE_QUALITY_STEPS) { + try { + const pipeline = sharpModule(buffer, { limitInputPixels: false }) + .rotate() + .resize({ + width: dimension, + height: dimension, + fit: 'inside', + withoutEnlargement: true, + }) + + const transformed = hasAlpha + ? { + buffer: await pipeline + .webp({ quality, alphaQuality: quality, effort: 4 }) + .toBuffer(), + mediaType: 'image/webp', + } + : { + buffer: await pipeline + .jpeg({ quality, mozjpeg: true, chromaSubsampling: '4:4:4' }) + .toBuffer(), + mediaType: 'image/jpeg', + } + + if (transformed.buffer.length <= MAX_IMAGE_READ_BYTES) { + logger.info('Resized image for VFS read', { + originalBytes: buffer.length, + outputBytes: transformed.buffer.length, + originalWidth: width || undefined, + originalHeight: height || undefined, + maxDimension: dimension, + quality, + originalMediaType: mediaType, + outputMediaType: transformed.mediaType, + }) + return { + buffer: transformed.buffer, + mediaType: transformed.mediaType, + resized: true, + } + } + } catch (err) { + logger.warn('Failed image resize attempt for VFS read', { + mediaType, + dimension, + quality, + error: err instanceof Error ? err.message : String(err), + }) + } + } + } + + return null +} + export interface FileReadResult { content: string totalLines: number @@ -61,27 +172,29 @@ export interface FileReadResult { */ export async function readFileRecord(record: WorkspaceFileRecord): Promise { try { - if (isImageFileType(record.type)) { - if (record.size > MAX_IMAGE_READ_BYTES) { - return { - content: `[Image too large: ${record.name} (${(record.size / 1024 / 1024).toFixed(1)}MB, limit 5MB)]`, - totalLines: 1, - } - } - const buffer = await downloadWorkspaceFile(record) - const mime = detectImageMime(buffer, record.type) - return { - content: `Image: ${record.name} (${(record.size / 1024).toFixed(1)}KB, ${mime})`, - totalLines: 1, - attachment: { - type: 'image', - source: { - type: 'base64', - media_type: mime, - data: buffer.toString('base64'), - }, - }, - } + if (isImageFileType(record.type)) { + const originalBuffer = await downloadWorkspaceFile(record) + const prepared = await prepareImageForVision(originalBuffer, record.type) + if (!prepared) { + return { + content: `[Image too large: ${record.name} (${(record.size / 1024 / 1024).toFixed(1)}MB, limit 5MB after resize/compression)]`, + totalLines: 1, + } + } + const sizeKb = (prepared.buffer.length / 1024).toFixed(1) + const resizeNote = prepared.resized ? ', resized for vision' : '' + return { + content: `Image: ${record.name} (${sizeKb}KB, ${prepared.mediaType}${resizeNote})`, + totalLines: 1, + attachment: { + type: 'image', + source: { + type: 'base64', + media_type: prepared.mediaType, + data: prepared.buffer.toString('base64'), + }, + }, + } } if (isReadableType(record.type)) { diff --git a/apps/sim/providers/models.ts b/apps/sim/providers/models.ts index 61b073e9dcc..49cf756f43a 100644 --- a/apps/sim/providers/models.ts +++ b/apps/sim/providers/models.ts @@ -519,6 +519,26 @@ export const PROVIDER_DEFINITIONS: Record = { toolUsageControl: true, }, models: [ + { + id: 'claude-opus-4-7', + pricing: { + input: 5.0, + cachedInput: 0.5, + output: 25.0, + updatedAt: '2026-04-16', + }, + capabilities: { + temperature: { min: 0, max: 1 }, + nativeStructuredOutputs: true, + maxOutputTokens: 128000, + thinking: { + levels: ['low', 'medium', 'high', 'max'], + default: 'high', + }, + }, + contextWindow: 1000000, + releaseDate: '2026-04-16', + }, { id: 'claude-opus-4-6', pricing: { From 8aa203cf7dab09859196ffea0a4b567184011438 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 16 Apr 2026 14:37:44 -0700 Subject: [PATCH 02/10] Tracing v2 --- apps/sim/app/api/billing/update-cost/route.ts | 47 + .../api/copilot/api-keys/validate/route.ts | 117 +- apps/sim/app/api/copilot/chat/stream/route.ts | 106 +- apps/sim/instrumentation-node.ts | 36 +- apps/sim/lib/copilot/async-runs/repository.ts | 407 ++++--- apps/sim/lib/copilot/chat/post.ts | 1071 ++++++++++------- apps/sim/lib/copilot/chat/terminal-state.ts | 87 +- .../lib/copilot/generated/trace-spans-v1.ts | 107 ++ .../sim/lib/copilot/request/go/propagation.ts | 31 + .../lib/copilot/request/lifecycle/start.ts | 88 +- apps/sim/lib/copilot/request/otel.ts | 237 +++- .../lib/copilot/request/session/recovery.ts | 130 +- apps/sim/lib/copilot/request/subagent.ts | 38 + apps/sim/lib/copilot/request/tools/files.ts | 129 +- .../lib/copilot/request/tools/resources.ts | 137 ++- apps/sim/lib/copilot/request/tools/tables.ts | 417 ++++--- apps/sim/lib/copilot/vfs/file-reader.ts | 455 ++++--- package.json | 6 +- scripts/sync-trace-spans-contract.ts | 155 +++ 19 files changed, 2581 insertions(+), 1220 deletions(-) create mode 100644 apps/sim/lib/copilot/generated/trace-spans-v1.ts create mode 100644 scripts/sync-trace-spans-contract.ts diff --git a/apps/sim/app/api/billing/update-cost/route.ts b/apps/sim/app/api/billing/update-cost/route.ts index f01ec13f939..c406c5981d8 100644 --- a/apps/sim/app/api/billing/update-cost/route.ts +++ b/apps/sim/app/api/billing/update-cost/route.ts @@ -4,7 +4,9 @@ import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { recordUsage } from '@/lib/billing/core/usage-log' import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { checkInternalApiKey } from '@/lib/copilot/request/http' +import { withIncomingGoSpan } from '@/lib/copilot/request/otel' import { isBillingEnabled } from '@/lib/core/config/feature-flags' import { type AtomicClaimResult, billingIdempotency } from '@/lib/core/idempotency/service' import { generateRequestId } from '@/lib/core/utils/request' @@ -26,8 +28,28 @@ const UpdateCostSchema = z.object({ /** * POST /api/billing/update-cost * Update user cost with a pre-calculated cost value (internal API key auth required) + * + * Parented under the Go-side `sim.update_cost` span via W3C traceparent + * propagation. Every mothership request that bills should therefore show + * the Go client span AND this Sim server span sharing one trace, with + * the actual usage/overage work nested below. */ export async function POST(req: NextRequest) { + return withIncomingGoSpan( + req.headers, + TraceSpan.CopilotBillingUpdateCost, + { + 'http.method': 'POST', + 'http.route': '/api/billing/update-cost', + }, + async (span) => updateCostInner(req, span), + ) +} + +async function updateCostInner( + req: NextRequest, + span: import('@opentelemetry/api').Span, +): Promise { const requestId = generateRequestId() const startTime = Date.now() let claim: AtomicClaimResult | null = null @@ -37,6 +59,8 @@ export async function POST(req: NextRequest) { logger.info(`[${requestId}] Update cost request started`) if (!isBillingEnabled) { + span.setAttribute('billing.outcome', 'billing_disabled') + span.setAttribute('http.status_code', 200) return NextResponse.json({ success: true, message: 'Billing disabled, cost update skipped', @@ -52,6 +76,8 @@ export async function POST(req: NextRequest) { const authResult = checkInternalApiKey(req) if (!authResult.success) { logger.warn(`[${requestId}] Authentication failed: ${authResult.error}`) + span.setAttribute('billing.outcome', 'auth_failed') + span.setAttribute('http.status_code', 401) return NextResponse.json( { success: false, @@ -69,6 +95,8 @@ export async function POST(req: NextRequest) { errors: validation.error.issues, body, }) + span.setAttribute('billing.outcome', 'invalid_body') + span.setAttribute('http.status_code', 400) return NextResponse.json( { success: false, @@ -83,6 +111,17 @@ export async function POST(req: NextRequest) { validation.data const isMcp = source === 'mcp_copilot' + span.setAttributes({ + 'user.id': userId, + 'gen_ai.request.model': model, + 'billing.source': source, + 'billing.cost_usd': cost, + 'gen_ai.usage.input_tokens': inputTokens, + 'gen_ai.usage.output_tokens': outputTokens, + 'billing.is_mcp': isMcp, + ...(idempotencyKey ? { 'billing.idempotency_key': idempotencyKey } : {}), + }) + claim = idempotencyKey ? await billingIdempotency.atomicallyClaim('update-cost', idempotencyKey) : null @@ -93,6 +132,8 @@ export async function POST(req: NextRequest) { userId, source, }) + span.setAttribute('billing.outcome', 'duplicate_idempotency_key') + span.setAttribute('http.status_code', 409) return NextResponse.json( { success: false, @@ -157,6 +198,9 @@ export async function POST(req: NextRequest) { cost, }) + span.setAttribute('billing.outcome', 'billed') + span.setAttribute('http.status_code', 200) + span.setAttribute('billing.duration_ms', duration) return NextResponse.json({ success: true, data: { @@ -191,6 +235,9 @@ export async function POST(req: NextRequest) { ) } + span.setAttribute('billing.outcome', 'internal_error') + span.setAttribute('http.status_code', 500) + span.setAttribute('billing.duration_ms', duration) return NextResponse.json( { success: false, diff --git a/apps/sim/app/api/copilot/api-keys/validate/route.ts b/apps/sim/app/api/copilot/api-keys/validate/route.ts index 1c1df540132..bd9d751819d 100644 --- a/apps/sim/app/api/copilot/api-keys/validate/route.ts +++ b/apps/sim/app/api/copilot/api-keys/validate/route.ts @@ -5,7 +5,9 @@ import { eq } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { checkServerSideUsageLimits } from '@/lib/billing/calculations/usage-monitor' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { checkInternalApiKey } from '@/lib/copilot/request/http' +import { withIncomingGoSpan } from '@/lib/copilot/request/otel' const logger = createLogger('CopilotApiKeysValidate') @@ -14,54 +16,83 @@ const ValidateApiKeySchema = z.object({ }) export async function POST(req: NextRequest) { - try { - const auth = checkInternalApiKey(req) - if (!auth.success) { - return new NextResponse(null, { status: 401 }) - } + // Incoming-from-Go: extracts traceparent so this handler's work shows + // up as a child of the Go-side `sim.validate_api_key` span in the same + // trace. If there's no traceparent (manual curl / browser), the helper + // falls back to a new root span. + return withIncomingGoSpan( + req.headers, + TraceSpan.CopilotAuthValidateApiKey, + { + 'http.method': 'POST', + 'http.route': '/api/copilot/api-keys/validate', + }, + async (span) => { + try { + const auth = checkInternalApiKey(req) + if (!auth.success) { + span.setAttribute('copilot.validate.outcome', 'internal_auth_failed') + span.setAttribute('http.status_code', 401) + return new NextResponse(null, { status: 401 }) + } - const body = await req.json().catch(() => null) + const body = await req.json().catch(() => null) + const validationResult = ValidateApiKeySchema.safeParse(body) + if (!validationResult.success) { + logger.warn('Invalid validation request', { errors: validationResult.error.errors }) + span.setAttribute('copilot.validate.outcome', 'invalid_body') + span.setAttribute('http.status_code', 400) + return NextResponse.json( + { + error: 'userId is required', + details: validationResult.error.errors, + }, + { status: 400 } + ) + } - const validationResult = ValidateApiKeySchema.safeParse(body) + const { userId } = validationResult.data + span.setAttribute('user.id', userId) - if (!validationResult.success) { - logger.warn('Invalid validation request', { errors: validationResult.error.errors }) - return NextResponse.json( - { - error: 'userId is required', - details: validationResult.error.errors, - }, - { status: 400 } - ) - } + const [existingUser] = await db.select().from(user).where(eq(user.id, userId)).limit(1) + if (!existingUser) { + logger.warn('[API VALIDATION] userId does not exist', { userId }) + span.setAttribute('copilot.validate.outcome', 'user_not_found') + span.setAttribute('http.status_code', 403) + return NextResponse.json({ error: 'User not found' }, { status: 403 }) + } - const { userId } = validationResult.data + logger.info('[API VALIDATION] Validating usage limit', { userId }) + const { isExceeded, currentUsage, limit } = await checkServerSideUsageLimits(userId) + span.setAttributes({ + 'billing.usage.current': currentUsage, + 'billing.usage.limit': limit, + 'billing.usage.exceeded': isExceeded, + }) - const [existingUser] = await db.select().from(user).where(eq(user.id, userId)).limit(1) - if (!existingUser) { - logger.warn('[API VALIDATION] userId does not exist', { userId }) - return NextResponse.json({ error: 'User not found' }, { status: 403 }) - } + logger.info('[API VALIDATION] Usage limit validated', { + userId, + currentUsage, + limit, + isExceeded, + }) - logger.info('[API VALIDATION] Validating usage limit', { userId }) + if (isExceeded) { + logger.info('[API VALIDATION] Usage exceeded', { userId, currentUsage, limit }) + span.setAttribute('copilot.validate.outcome', 'usage_exceeded') + span.setAttribute('http.status_code', 402) + return new NextResponse(null, { status: 402 }) + } - const { isExceeded, currentUsage, limit } = await checkServerSideUsageLimits(userId) - - logger.info('[API VALIDATION] Usage limit validated', { - userId, - currentUsage, - limit, - isExceeded, - }) - - if (isExceeded) { - logger.info('[API VALIDATION] Usage exceeded', { userId, currentUsage, limit }) - return new NextResponse(null, { status: 402 }) - } - - return new NextResponse(null, { status: 200 }) - } catch (error) { - logger.error('Error validating usage limit', { error }) - return NextResponse.json({ error: 'Failed to validate usage' }, { status: 500 }) - } + span.setAttribute('copilot.validate.outcome', 'ok') + span.setAttribute('http.status_code', 200) + return new NextResponse(null, { status: 200 }) + } catch (error) { + logger.error('Error validating usage limit', { error }) + span.setAttribute('copilot.validate.outcome', 'internal_error') + span.setAttribute('http.status_code', 500) + return NextResponse.json({ error: 'Failed to validate usage' }, { status: 500 }) + } + }, + ) } diff --git a/apps/sim/app/api/copilot/chat/stream/route.ts b/apps/sim/app/api/copilot/chat/stream/route.ts index ad6593cf381..c8a34f160a6 100644 --- a/apps/sim/app/api/copilot/chat/stream/route.ts +++ b/apps/sim/app/api/copilot/chat/stream/route.ts @@ -1,3 +1,8 @@ +import { + context as otelContext, + SpanStatusCode, + trace, +} from "@opentelemetry/api"; import { createLogger } from "@sim/logger"; import { type NextRequest, NextResponse } from "next/server"; import { getLatestRunForStream } from "@/lib/copilot/async-runs/repository"; @@ -6,6 +11,7 @@ import { MothershipStreamV1EventType, } from "@/lib/copilot/generated/mothership-stream-v1"; import { authenticateCopilotRequestSessionOnly } from "@/lib/copilot/request/http"; +import { getCopilotTracer } from "@/lib/copilot/request/otel"; import { checkForReplayGap, createEvent, @@ -127,6 +133,64 @@ export async function GET(request: NextRequest) { ); } + // Root span for the whole resume/reconnect request. In stream mode the + // work happens inside `ReadableStream.start`, which the Node runtime + // invokes after this function returns and OUTSIDE the AsyncLocalStorage + // scope installed by `startActiveSpan`. We therefore start the span + // manually, capture its context, and re-enter that context inside the + // stream callback so every nested `withCopilotSpan` / `withDbSpan` call + // attaches to this root. + const rootSpan = getCopilotTracer().startSpan("copilot.resume.request", { + attributes: { + "copilot.transport": batchMode ? "batch" : "stream", + "stream.id": streamId, + "user.id": authenticatedUserId, + "copilot.resume.after_cursor": afterCursor || "0", + }, + }); + const rootContext = trace.setSpan(otelContext.active(), rootSpan); + + try { + return await otelContext.with(rootContext, () => + handleResumeRequestBody({ + request, + streamId, + afterCursor, + batchMode, + authenticatedUserId, + rootSpan, + rootContext, + }), + ); + } catch (err) { + rootSpan.setStatus({ + code: SpanStatusCode.ERROR, + message: err instanceof Error ? err.message : String(err), + }); + rootSpan.recordException(err instanceof Error ? err : new Error(String(err))); + rootSpan.end(); + throw err; + } +} + +async function handleResumeRequestBody({ + request, + streamId, + afterCursor, + batchMode, + authenticatedUserId, + rootSpan, + rootContext, +}: { + request: NextRequest; + streamId: string; + afterCursor: string; + batchMode: boolean; + authenticatedUserId: string; + rootSpan: import("@opentelemetry/api").Span; + rootContext: import("@opentelemetry/api").Context; +}) { + const run = await getLatestRunForStream(streamId, authenticatedUserId).catch( (err) => { logger.warn("Failed to fetch latest run for stream", { @@ -144,8 +208,11 @@ export async function GET(request: NextRequest) { runStatus: run?.status, }); if (!run) { + rootSpan.setAttribute("copilot.resume.outcome", "stream_not_found"); + rootSpan.end(); return NextResponse.json({ error: "Stream not found" }, { status: 404 }); } + rootSpan.setAttribute("copilot.run.status", run.status); if (batchMode) { const afterSeq = afterCursor || "0"; @@ -167,6 +234,12 @@ export async function GET(request: NextRequest) { previewSessionCount: previewSessions.length, runStatus: run.status, }); + rootSpan.setAttributes({ + "copilot.resume.outcome": "batch_delivered", + "copilot.resume.event_count": batchEvents.length, + "copilot.resume.preview_session_count": previewSessions.length, + }); + rootSpan.end(); return NextResponse.json({ success: true, events: batchEvents, @@ -176,9 +249,19 @@ export async function GET(request: NextRequest) { } const startTime = Date.now(); + let totalEventsFlushed = 0; + let pollIterations = 0; const stream = new ReadableStream({ async start(controller) { + // Re-enter the root OTel context so any `withCopilotSpan` call below + // (inside flushEvents/checkForReplayGap/etc.) parents under + // copilot.resume.request instead of becoming an orphan. + return otelContext.with(rootContext, () => startInner(controller)); + }, + }); + + async function startInner(controller: ReadableStreamDefaultController) { let cursor = afterCursor || "0"; let controllerClosed = false; let sawTerminalEvent = false; @@ -213,6 +296,7 @@ export async function GET(request: NextRequest) { const flushEvents = async () => { const events = await readEvents(streamId, cursor); if (events.length > 0) { + totalEventsFlushed += events.length; logger.info("[Resume] Flushing events", { streamId, afterCursor: cursor, @@ -274,6 +358,7 @@ export async function GET(request: NextRequest) { await flushEvents(); while (!controllerClosed && Date.now() - startTime < MAX_STREAM_MS) { + pollIterations += 1; const currentRun = await getLatestRunForStream( streamId, authenticatedUserId, @@ -342,12 +427,29 @@ export async function GET(request: NextRequest) { reason: "stream_replay_failed", }); } + rootSpan.setStatus({ + code: SpanStatusCode.ERROR, + message: error instanceof Error ? error.message : String(error), + }); + rootSpan.recordException( + error instanceof Error ? error : new Error(String(error)), + ); } finally { request.signal.removeEventListener("abort", abortListener); closeController(); + rootSpan.setAttributes({ + "copilot.resume.outcome": sawTerminalEvent + ? "terminal_delivered" + : controllerClosed + ? "client_disconnected" + : "ended_without_terminal", + "copilot.resume.event_count": totalEventsFlushed, + "copilot.resume.poll_iterations": pollIterations, + "copilot.resume.duration_ms": Date.now() - startTime, + }); + rootSpan.end(); } - }, - }); + } return new Response(stream, { headers: SSE_RESPONSE_HEADERS }); } diff --git a/apps/sim/instrumentation-node.ts b/apps/sim/instrumentation-node.ts index ff89d5b5eea..936aaaac46c 100644 --- a/apps/sim/instrumentation-node.ts +++ b/apps/sim/instrumentation-node.ts @@ -49,25 +49,23 @@ const DEFAULT_TELEMETRY_CONFIG = { } /** - * Span name prefixes we keep after sampling. All spans we care about - * (copilot lifecycle, fetchGo Sim→Go calls, gen_ai.* root, workflow/block - * executions, etc.) start with one of these. Anything else is Next.js - * framework noise and gets dropped unless its parent is already sampled. + * Span name prefixes we keep after sampling. + * + * Scope: this process only traces *mothership / copilot* requests for now. + * Anything outside that lifecycle (workflow executor, block runtime, + * Next.js framework noise, etc.) is intentionally dropped so Jaeger only + * shows the Sim half of a mothership trace. + * + * Any new prefix here should correspond to a span our copilot code + * explicitly creates; adding a broad prefix (e.g. `http.`) risks + * silently re-enabling non-copilot tracing. */ const ALLOWED_SPAN_PREFIXES = [ - 'platform.', 'gen_ai.', - 'workflow.', - 'block.', - 'http.client.', - 'function.', - 'router.', - 'condition.', - 'loop.', - 'parallel.', 'copilot.', 'sim →', 'sim.', + 'tool.execute', ] function isBusinessSpan(spanName: string): boolean { @@ -227,12 +225,24 @@ async function initializeOpenTelemetry() { exportTimeoutMillis: telemetryConfig.batchSettings.exportTimeoutMillis, }) + // service.instance.id identifies this specific process within the + // shared `mothership` service. Jaeger's clock-skew adjuster groups + // spans by (service, instance) — without a unique instance per + // origin, Sim and Go spans fall into the same group, Jaeger sees + // multi-second cross-machine clock drift within one group, and its + // adjuster emits spurious "parent is not in the trace; skipping + // clock skew adjustment" warnings on every cross-process child. + // Stable per-origin instance ID (`mothership-sim` / `mothership-go`) + // is enough to split the groups cleanly; Jaeger still shows both + // under the single `mothership` service in its service picker. + const serviceInstanceId = `${telemetryConfig.serviceName}-${MOTHERSHIP_ORIGIN}` const resource = defaultResource().merge( resourceFromAttributes({ [ATTR_SERVICE_NAME]: telemetryConfig.serviceName, [ATTR_SERVICE_VERSION]: telemetryConfig.serviceVersion, [ATTR_DEPLOYMENT_ENVIRONMENT]: env.NODE_ENV || 'development', 'service.namespace': 'mothership', + 'service.instance.id': serviceInstanceId, 'mothership.origin': MOTHERSHIP_ORIGIN, 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.language': 'nodejs', diff --git a/apps/sim/lib/copilot/async-runs/repository.ts b/apps/sim/lib/copilot/async-runs/repository.ts index 9fd082e72d7..6c572e14344 100644 --- a/apps/sim/lib/copilot/async-runs/repository.ts +++ b/apps/sim/lib/copilot/async-runs/repository.ts @@ -152,33 +152,58 @@ export async function updateRunStatus( } export async function getLatestRunForExecution(executionId: string) { - const [run] = await db - .select() - .from(copilotRuns) - .where(eq(copilotRuns.executionId, executionId)) - .orderBy(desc(copilotRuns.startedAt)) - .limit(1) - - return run ?? null + return withDbSpan( + 'copilot.async_runs.get_latest_for_execution', + 'SELECT', + 'copilot_runs', + { 'copilot.execution_id': executionId }, + async () => { + const [run] = await db + .select() + .from(copilotRuns) + .where(eq(copilotRuns.executionId, executionId)) + .orderBy(desc(copilotRuns.startedAt)) + .limit(1) + return run ?? null + }, + ) } export async function getLatestRunForStream(streamId: string, userId?: string) { - const conditions = userId - ? and(eq(copilotRuns.streamId, streamId), eq(copilotRuns.userId, userId)) - : eq(copilotRuns.streamId, streamId) - const [run] = await db - .select() - .from(copilotRuns) - .where(conditions) - .orderBy(desc(copilotRuns.startedAt)) - .limit(1) - - return run ?? null + return withDbSpan( + 'copilot.async_runs.get_latest_for_stream', + 'SELECT', + 'copilot_runs', + { + 'copilot.stream_id': streamId, + 'copilot.user_id': userId, + }, + async () => { + const conditions = userId + ? and(eq(copilotRuns.streamId, streamId), eq(copilotRuns.userId, userId)) + : eq(copilotRuns.streamId, streamId) + const [run] = await db + .select() + .from(copilotRuns) + .where(conditions) + .orderBy(desc(copilotRuns.startedAt)) + .limit(1) + return run ?? null + }, + ) } export async function getRunSegment(runId: string) { - const [run] = await db.select().from(copilotRuns).where(eq(copilotRuns.id, runId)).limit(1) - return run ?? null + return withDbSpan( + 'copilot.async_runs.get_run_segment', + 'SELECT', + 'copilot_runs', + { 'copilot.run.id': runId }, + async () => { + const [run] = await db.select().from(copilotRuns).where(eq(copilotRuns.id, runId)).limit(1) + return run ?? null + }, + ) } export async function createRunCheckpoint(input: { @@ -188,18 +213,29 @@ export async function createRunCheckpoint(input: { agentState: Record providerRequest: Record }) { - const [checkpoint] = await db - .insert(copilotRunCheckpoints) - .values({ - runId: input.runId, - pendingToolCallId: input.pendingToolCallId, - conversationSnapshot: input.conversationSnapshot, - agentState: input.agentState, - providerRequest: input.providerRequest, - }) - .returning() + return withDbSpan( + 'copilot.async_runs.create_run_checkpoint', + 'INSERT', + 'copilot_run_checkpoints', + { + 'copilot.run.id': input.runId, + 'copilot.checkpoint.pending_tool_call_id': input.pendingToolCallId, + }, + async () => { + const [checkpoint] = await db + .insert(copilotRunCheckpoints) + .values({ + runId: input.runId, + pendingToolCallId: input.pendingToolCallId, + conversationSnapshot: input.conversationSnapshot, + agentState: input.agentState, + providerRequest: input.providerRequest, + }) + .returning() - return checkpoint + return checkpoint + }, + ) } export async function upsertAsyncToolCall(input: { @@ -210,67 +246,87 @@ export async function upsertAsyncToolCall(input: { args?: Record status?: CopilotAsyncToolStatus }) { - const existing = await getAsyncToolCall(input.toolCallId) - const incomingStatus = input.status ?? 'pending' - if ( - existing && - (isTerminalAsyncStatus(existing.status) || isDeliveredAsyncStatus(existing.status)) && - !isTerminalAsyncStatus(incomingStatus) && - !isDeliveredAsyncStatus(incomingStatus) - ) { - logger.info('Ignoring async tool upsert that would downgrade terminal state', { - toolCallId: input.toolCallId, - existingStatus: existing.status, - incomingStatus, - }) - return existing - } - const effectiveRunId = input.runId ?? existing?.runId ?? null - if (!effectiveRunId) { - logger.warn('upsertAsyncToolCall missing runId and no existing row', { - toolCallId: input.toolCallId, - toolName: input.toolName, - status: input.status ?? 'pending', - }) - return null - } + return withDbSpan( + 'copilot.async_runs.upsert_async_tool_call', + 'UPSERT', + 'copilot_async_tool_calls', + { + 'tool.call_id': input.toolCallId, + 'tool.name': input.toolName, + 'copilot.async_tool.status': input.status ?? 'pending', + 'copilot.run.id': input.runId ?? undefined, + }, + async () => { + const existing = await getAsyncToolCall(input.toolCallId) + const incomingStatus = input.status ?? 'pending' + if ( + existing && + (isTerminalAsyncStatus(existing.status) || isDeliveredAsyncStatus(existing.status)) && + !isTerminalAsyncStatus(incomingStatus) && + !isDeliveredAsyncStatus(incomingStatus) + ) { + logger.info('Ignoring async tool upsert that would downgrade terminal state', { + toolCallId: input.toolCallId, + existingStatus: existing.status, + incomingStatus, + }) + return existing + } + const effectiveRunId = input.runId ?? existing?.runId ?? null + if (!effectiveRunId) { + logger.warn('upsertAsyncToolCall missing runId and no existing row', { + toolCallId: input.toolCallId, + toolName: input.toolName, + status: input.status ?? 'pending', + }) + return null + } - const now = new Date() - const [row] = await db - .insert(copilotAsyncToolCalls) - .values({ - runId: effectiveRunId, - checkpointId: input.checkpointId ?? null, - toolCallId: input.toolCallId, - toolName: input.toolName, - args: input.args ?? {}, - status: incomingStatus, - updatedAt: now, - }) - .onConflictDoUpdate({ - target: copilotAsyncToolCalls.toolCallId, - set: { - runId: effectiveRunId, - checkpointId: input.checkpointId ?? null, - toolName: input.toolName, - args: input.args ?? {}, - status: incomingStatus, - updatedAt: now, - }, - }) - .returning() + const now = new Date() + const [row] = await db + .insert(copilotAsyncToolCalls) + .values({ + runId: effectiveRunId, + checkpointId: input.checkpointId ?? null, + toolCallId: input.toolCallId, + toolName: input.toolName, + args: input.args ?? {}, + status: incomingStatus, + updatedAt: now, + }) + .onConflictDoUpdate({ + target: copilotAsyncToolCalls.toolCallId, + set: { + runId: effectiveRunId, + checkpointId: input.checkpointId ?? null, + toolName: input.toolName, + args: input.args ?? {}, + status: incomingStatus, + updatedAt: now, + }, + }) + .returning() - return row + return row + }, + ) } export async function getAsyncToolCall(toolCallId: string) { - const [row] = await db - .select() - .from(copilotAsyncToolCalls) - .where(eq(copilotAsyncToolCalls.toolCallId, toolCallId)) - .limit(1) - - return row ?? null + return withDbSpan( + 'copilot.async_runs.get_async_tool_call', + 'SELECT', + 'copilot_async_tool_calls', + { 'tool.call_id': toolCallId }, + async () => { + const [row] = await db + .select() + .from(copilotAsyncToolCalls) + .where(eq(copilotAsyncToolCalls.toolCallId, toolCallId)) + .limit(1) + return row ?? null + }, + ) } export async function markAsyncToolStatus( @@ -284,28 +340,41 @@ export async function markAsyncToolStatus( completedAt?: Date | null } = {} ) { - const claimedAt = - updates.claimedAt !== undefined - ? updates.claimedAt - : status === 'running' && updates.claimedBy - ? new Date() - : undefined - - const [row] = await db - .update(copilotAsyncToolCalls) - .set({ - status, - claimedBy: updates.claimedBy, - claimedAt, - result: updates.result, - error: updates.error, - completedAt: updates.completedAt, - updatedAt: new Date(), - }) - .where(eq(copilotAsyncToolCalls.toolCallId, toolCallId)) - .returning() + return withDbSpan( + 'copilot.async_runs.mark_async_tool_status', + 'UPDATE', + 'copilot_async_tool_calls', + { + 'tool.call_id': toolCallId, + 'copilot.async_tool.status': status, + 'copilot.async_tool.has_error': !!updates.error, + 'copilot.async_tool.claimed_by': updates.claimedBy ?? undefined, + }, + async () => { + const claimedAt = + updates.claimedAt !== undefined + ? updates.claimedAt + : status === 'running' && updates.claimedBy + ? new Date() + : undefined + + const [row] = await db + .update(copilotAsyncToolCalls) + .set({ + status, + claimedBy: updates.claimedBy, + claimedAt, + result: updates.result, + error: updates.error, + completedAt: updates.completedAt, + updatedAt: new Date(), + }) + .where(eq(copilotAsyncToolCalls.toolCallId, toolCallId)) + .returning() - return row ?? null + return row ?? null + }, + ) } export async function markAsyncToolRunning(toolCallId: string, claimedBy: string) { @@ -349,57 +418,91 @@ export async function markAsyncToolDelivered(toolCallId: string) { } export async function listAsyncToolCallsForRun(runId: string) { - return db - .select() - .from(copilotAsyncToolCalls) - .where(eq(copilotAsyncToolCalls.runId, runId)) - .orderBy(desc(copilotAsyncToolCalls.createdAt)) + return withDbSpan( + 'copilot.async_runs.list_for_run', + 'SELECT', + 'copilot_async_tool_calls', + { 'copilot.run.id': runId }, + async () => + db + .select() + .from(copilotAsyncToolCalls) + .where(eq(copilotAsyncToolCalls.runId, runId)) + .orderBy(desc(copilotAsyncToolCalls.createdAt)), + ) } export async function getAsyncToolCalls(toolCallIds: string[]) { if (toolCallIds.length === 0) return [] - return db - .select() - .from(copilotAsyncToolCalls) - .where(inArray(copilotAsyncToolCalls.toolCallId, toolCallIds)) + return withDbSpan( + 'copilot.async_runs.get_many', + 'SELECT', + 'copilot_async_tool_calls', + { 'copilot.async_tool.ids_count': toolCallIds.length }, + async () => + db + .select() + .from(copilotAsyncToolCalls) + .where(inArray(copilotAsyncToolCalls.toolCallId, toolCallIds)), + ) } export async function claimCompletedAsyncToolCall(toolCallId: string, workerId: string) { - const [row] = await db - .update(copilotAsyncToolCalls) - .set({ - claimedBy: workerId, - claimedAt: new Date(), - updatedAt: new Date(), - }) - .where( - and( - eq(copilotAsyncToolCalls.toolCallId, toolCallId), - inArray(copilotAsyncToolCalls.status, ['completed', 'failed', 'cancelled']), - isNull(copilotAsyncToolCalls.claimedBy) - ) - ) - .returning() - - return row ?? null + return withDbSpan( + 'copilot.async_runs.claim_completed', + 'UPDATE', + 'copilot_async_tool_calls', + { + 'tool.call_id': toolCallId, + 'copilot.async_tool.worker_id': workerId, + }, + async () => { + const [row] = await db + .update(copilotAsyncToolCalls) + .set({ + claimedBy: workerId, + claimedAt: new Date(), + updatedAt: new Date(), + }) + .where( + and( + eq(copilotAsyncToolCalls.toolCallId, toolCallId), + inArray(copilotAsyncToolCalls.status, ['completed', 'failed', 'cancelled']), + isNull(copilotAsyncToolCalls.claimedBy) + ) + ) + .returning() + return row ?? null + }, + ) } export async function releaseCompletedAsyncToolClaim(toolCallId: string, workerId: string) { - const [row] = await db - .update(copilotAsyncToolCalls) - .set({ - claimedBy: null, - claimedAt: null, - updatedAt: new Date(), - }) - .where( - and( - eq(copilotAsyncToolCalls.toolCallId, toolCallId), - inArray(copilotAsyncToolCalls.status, ['completed', 'failed', 'cancelled']), - eq(copilotAsyncToolCalls.claimedBy, workerId) - ) - ) - .returning() - - return row ?? null + return withDbSpan( + 'copilot.async_runs.release_claim', + 'UPDATE', + 'copilot_async_tool_calls', + { + 'tool.call_id': toolCallId, + 'copilot.async_tool.worker_id': workerId, + }, + async () => { + const [row] = await db + .update(copilotAsyncToolCalls) + .set({ + claimedBy: null, + claimedAt: null, + updatedAt: new Date(), + }) + .where( + and( + eq(copilotAsyncToolCalls.toolCallId, toolCallId), + inArray(copilotAsyncToolCalls.status, ['completed', 'failed', 'cancelled']), + eq(copilotAsyncToolCalls.claimedBy, workerId) + ) + ) + .returning() + return row ?? null + }, + ) } diff --git a/apps/sim/lib/copilot/chat/post.ts b/apps/sim/lib/copilot/chat/post.ts index 8581621d1f2..ca1aec8d7c5 100644 --- a/apps/sim/lib/copilot/chat/post.ts +++ b/apps/sim/lib/copilot/chat/post.ts @@ -1,47 +1,68 @@ -import { db } from '@sim/db' -import { copilotChats } from '@sim/db/schema' -import { createLogger } from '@sim/logger' -import { eq, sql } from 'drizzle-orm' -import { type NextRequest, NextResponse } from 'next/server' -import { z } from 'zod' -import { getSession } from '@/lib/auth' -import { type ChatLoadResult, resolveOrCreateChat } from '@/lib/copilot/chat/lifecycle' -import { buildCopilotRequestPayload } from '@/lib/copilot/chat/payload' +import { db } from "@sim/db"; +import { copilotChats } from "@sim/db/schema"; +import { createLogger } from "@sim/logger"; +import { eq, sql } from "drizzle-orm"; +import { type NextRequest, NextResponse } from "next/server"; +import { z } from "zod"; +import { getSession } from "@/lib/auth"; +import { + type ChatLoadResult, + resolveOrCreateChat, +} from "@/lib/copilot/chat/lifecycle"; +import { buildCopilotRequestPayload } from "@/lib/copilot/chat/payload"; import { buildPersistedAssistantMessage, buildPersistedUserMessage, -} from '@/lib/copilot/chat/persisted-message' +} from "@/lib/copilot/chat/persisted-message"; import { processContextsServer, resolveActiveResourceContext, -} from '@/lib/copilot/chat/process-contents' -import { finalizeAssistantTurn } from '@/lib/copilot/chat/terminal-state' -import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context' -import { COPILOT_REQUEST_MODES } from '@/lib/copilot/constants' +} from "@/lib/copilot/chat/process-contents"; +import { + context as otelContextApi, + type Context as OtelContext, +} from "@opentelemetry/api"; +import { finalizeAssistantTurn } from "@/lib/copilot/chat/terminal-state"; +import { generateWorkspaceContext } from "@/lib/copilot/chat/workspace-context"; +import { TraceSpan } from "@/lib/copilot/generated/trace-spans-v1"; +import { + startCopilotOtelRoot, + withCopilotSpan, +} from "@/lib/copilot/request/otel"; +import { COPILOT_REQUEST_MODES } from "@/lib/copilot/constants"; import { createBadRequestResponse, createRequestTracker, createUnauthorizedResponse, -} from '@/lib/copilot/request/http' -import { createSSEStream, SSE_RESPONSE_HEADERS } from '@/lib/copilot/request/lifecycle/start' +} from "@/lib/copilot/request/http"; +import { + createSSEStream, + SSE_RESPONSE_HEADERS, +} from "@/lib/copilot/request/lifecycle/start"; import { acquirePendingChatStream, getPendingChatStreamId, releasePendingChatStream, -} from '@/lib/copilot/request/session' -import type { ExecutionContext, OrchestratorResult } from '@/lib/copilot/request/types' -import { persistChatResources } from '@/lib/copilot/resources/persistence' -import { taskPubSub } from '@/lib/copilot/tasks' -import { prepareExecutionContext } from '@/lib/copilot/tools/handlers/context' -import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' -import { getWorkflowById, resolveWorkflowIdForUser } from '@/lib/workflows/utils' -import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils' -import type { ChatContext } from '@/stores/panel' +} from "@/lib/copilot/request/session"; +import type { + ExecutionContext, + OrchestratorResult, +} from "@/lib/copilot/request/types"; +import { persistChatResources } from "@/lib/copilot/resources/persistence"; +import { taskPubSub } from "@/lib/copilot/tasks"; +import { prepareExecutionContext } from "@/lib/copilot/tools/handlers/context"; +import { getEffectiveDecryptedEnv } from "@/lib/environment/utils"; +import { + getWorkflowById, + resolveWorkflowIdForUser, +} from "@/lib/workflows/utils"; +import { getUserEntityPermissions } from "@/lib/workspaces/permissions/utils"; +import type { ChatContext } from "@/stores/panel"; -export const maxDuration = 3600 +export const maxDuration = 3600; -const logger = createLogger('UnifiedChatAPI') -const DEFAULT_MODEL = 'claude-opus-4-6' +const logger = createLogger("UnifiedChatAPI"); +const DEFAULT_MODEL = "claude-opus-4-6"; const FileAttachmentSchema = z.object({ id: z.string(), @@ -49,40 +70,52 @@ const FileAttachmentSchema = z.object({ filename: z.string(), media_type: z.string(), size: z.number(), -}) +}); const ResourceAttachmentSchema = z.object({ - type: z.enum(['workflow', 'table', 'file', 'knowledgebase', 'folder', 'task', 'log', 'generic']), + type: z.enum([ + "workflow", + "table", + "file", + "knowledgebase", + "folder", + "task", + "log", + "generic", + ]), id: z.string().min(1), title: z.string().optional(), active: z.boolean().optional(), -}) - -const GENERIC_RESOURCE_TITLE: Record['type'], string> = { - workflow: 'Workflow', - table: 'Table', - file: 'File', - knowledgebase: 'Knowledge Base', - folder: 'Folder', - task: 'Task', - log: 'Log', - generic: 'Resource', -} +}); + +const GENERIC_RESOURCE_TITLE: Record< + z.infer["type"], + string +> = { + workflow: "Workflow", + table: "Table", + file: "File", + knowledgebase: "Knowledge Base", + folder: "Folder", + task: "Task", + log: "Log", + generic: "Resource", +}; const ChatContextSchema = z.object({ kind: z.enum([ - 'past_chat', - 'workflow', - 'current_workflow', - 'blocks', - 'logs', - 'workflow_block', - 'knowledge', - 'templates', - 'docs', - 'table', - 'file', - 'folder', + "past_chat", + "workflow", + "current_workflow", + "blocks", + "logs", + "workflow_block", + "knowledge", + "templates", + "docs", + "table", + "file", + "folder", ]), label: z.string(), chatId: z.string().optional(), @@ -95,17 +128,17 @@ const ChatContextSchema = z.object({ tableId: z.string().optional(), fileId: z.string().optional(), folderId: z.string().optional(), -}) +}); const ChatMessageSchema = z.object({ - message: z.string().min(1, 'Message is required'), + message: z.string().min(1, "Message is required"), userMessageId: z.string().optional(), chatId: z.string().optional(), workflowId: z.string().optional(), workspaceId: z.string().optional(), workflowName: z.string().optional(), model: z.string().optional().default(DEFAULT_MODEL), - mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'), + mode: z.enum(COPILOT_REQUEST_MODES).optional().default("agent"), prefetch: z.boolean().optional(), createNewChat: z.boolean().optional().default(false), implicitFeedback: z.string().optional(), @@ -115,98 +148,106 @@ const ChatMessageSchema = z.object({ contexts: z.array(ChatContextSchema).optional(), commands: z.array(z.string()).optional(), userTimezone: z.string().optional(), -}) +}); -type UnifiedChatRequest = z.infer +type UnifiedChatRequest = z.infer; type UnifiedChatBranch = | { - kind: 'workflow' - workflowId: string - workflowName?: string - workspaceId?: string - selectedModel: string - mode: UnifiedChatRequest['mode'] - provider?: string - goRoute: '/api/copilot' - titleModel: string - titleProvider?: string - notifyWorkspaceStatus: false + kind: "workflow"; + workflowId: string; + workflowName?: string; + workspaceId?: string; + selectedModel: string; + mode: UnifiedChatRequest["mode"]; + provider?: string; + goRoute: "/api/copilot"; + titleModel: string; + titleProvider?: string; + notifyWorkspaceStatus: false; buildPayload: (params: { - message: string - userId: string - userMessageId: string - chatId?: string - contexts: Array<{ type: string; content: string }> - fileAttachments?: UnifiedChatRequest['fileAttachments'] - userPermission?: string - userTimezone?: string - workflowId: string - workflowName?: string - workspaceId?: string - mode: UnifiedChatRequest['mode'] - provider?: string - commands?: string[] - prefetch?: boolean - implicitFeedback?: string - }) => Promise> + message: string; + userId: string; + userMessageId: string; + chatId?: string; + contexts: Array<{ type: string; content: string }>; + fileAttachments?: UnifiedChatRequest["fileAttachments"]; + userPermission?: string; + userTimezone?: string; + workflowId: string; + workflowName?: string; + workspaceId?: string; + mode: UnifiedChatRequest["mode"]; + provider?: string; + commands?: string[]; + prefetch?: boolean; + implicitFeedback?: string; + }) => Promise>; buildExecutionContext: (params: { - userId: string - chatId?: string - userTimezone?: string - messageId: string - }) => Promise + userId: string; + chatId?: string; + userTimezone?: string; + messageId: string; + }) => Promise; } | { - kind: 'workspace' - workspaceId: string - goRoute: '/api/mothership' - titleModel: string - titleProvider?: undefined - notifyWorkspaceStatus: true + kind: "workspace"; + workspaceId: string; + goRoute: "/api/mothership"; + titleModel: string; + titleProvider?: undefined; + notifyWorkspaceStatus: true; buildPayload: (params: { - message: string - userId: string - userMessageId: string - chatId?: string - contexts: Array<{ type: string; content: string }> - fileAttachments?: UnifiedChatRequest['fileAttachments'] - userPermission?: string - userTimezone?: string - workspaceContext?: string - }) => Promise> + message: string; + userId: string; + userMessageId: string; + chatId?: string; + contexts: Array<{ type: string; content: string }>; + fileAttachments?: UnifiedChatRequest["fileAttachments"]; + userPermission?: string; + userTimezone?: string; + workspaceContext?: string; + }) => Promise>; buildExecutionContext: (params: { - userId: string - chatId?: string - userTimezone?: string - messageId: string - }) => Promise - } - -function normalizeContexts(contexts: UnifiedChatRequest['contexts']) { + userId: string; + chatId?: string; + userTimezone?: string; + messageId: string; + }) => Promise; + }; + +function normalizeContexts(contexts: UnifiedChatRequest["contexts"]) { if (!Array.isArray(contexts)) { - return contexts + return contexts; } return contexts.map((ctx) => { - if (ctx.kind !== 'blocks') return ctx - if (Array.isArray(ctx.blockIds) && ctx.blockIds.length > 0) return ctx - if (ctx.blockId) return { ...ctx, blockIds: [ctx.blockId] } - return ctx - }) + if (ctx.kind !== "blocks") return ctx; + if (Array.isArray(ctx.blockIds) && ctx.blockIds.length > 0) return ctx; + if (ctx.blockId) return { ...ctx, blockIds: [ctx.blockId] }; + return ctx; + }); } async function resolveAgentContexts(params: { - contexts?: UnifiedChatRequest['contexts'] - resourceAttachments?: UnifiedChatRequest['resourceAttachments'] - userId: string - message: string - workspaceId?: string - chatId?: string - requestId: string + contexts?: UnifiedChatRequest["contexts"]; + resourceAttachments?: UnifiedChatRequest["resourceAttachments"]; + userId: string; + message: string; + workspaceId?: string; + chatId?: string; + requestId: string; }): Promise> { - const { contexts, resourceAttachments, userId, message, workspaceId, chatId, requestId } = params + const { + contexts, + resourceAttachments, + userId, + message, + workspaceId, + chatId, + requestId, + } = params; - let agentContexts: Array<{ type: string; content: string }> = [] + let agentContexts: Array<{ type: string; content: string }> = []; if (Array.isArray(contexts) && contexts.length > 0) { try { @@ -215,14 +256,18 @@ async function resolveAgentContexts(params: { userId, message, workspaceId, - chatId - ) + chatId, + ); } catch (error) { - logger.error(`[${requestId}] Failed to process contexts`, error) + logger.error(`[${requestId}] Failed to process contexts`, error); } } - if (Array.isArray(resourceAttachments) && resourceAttachments.length > 0 && workspaceId) { + if ( + Array.isArray(resourceAttachments) && + resourceAttachments.length > 0 && + workspaceId + ) { const results = await Promise.allSettled( resourceAttachments.map(async (resource) => { const ctx = await resolveActiveResourceContext( @@ -230,33 +275,45 @@ async function resolveAgentContexts(params: { resource.id, workspaceId, userId, - chatId - ) - if (!ctx) return null - return { ...ctx, tag: resource.active ? '@active_tab' : '@open_tab' } - }) - ) + chatId, + ); + if (!ctx) return null; + return { ...ctx, tag: resource.active ? "@active_tab" : "@open_tab" }; + }), + ); for (const result of results) { - if (result.status === 'fulfilled' && result.value) { - agentContexts.push(result.value) - } else if (result.status === 'rejected') { - logger.error(`[${requestId}] Failed to resolve resource attachment`, result.reason) + if (result.status === "fulfilled" && result.value) { + agentContexts.push(result.value); + } else if (result.status === "rejected") { + logger.error( + `[${requestId}] Failed to resolve resource attachment`, + result.reason, + ); } } } - return agentContexts + return agentContexts; } async function persistUserMessage(params: { - chatId?: string - userMessageId: string - message: string - fileAttachments?: UnifiedChatRequest['fileAttachments'] - contexts?: UnifiedChatRequest['contexts'] - workspaceId?: string - notifyWorkspaceStatus: boolean + chatId?: string; + userMessageId: string; + message: string; + fileAttachments?: UnifiedChatRequest["fileAttachments"]; + contexts?: UnifiedChatRequest["contexts"]; + workspaceId?: string; + notifyWorkspaceStatus: boolean; + /** + * Root context for the mothership request. When present the persist + * span is created explicitly under it, which avoids relying on + * AsyncLocalStorage propagation — some upstream awaits (Next.js + * framework frames, Turbopack-instrumented I/O) can swap the active + * store out from under us in dev, which would otherwise leave this + * span parented to the about-to-be-dropped Next.js HTTP span. + */ + parentOtelContext?: OtelContext; }): Promise { const { chatId, @@ -266,59 +323,96 @@ async function persistUserMessage(params: { contexts, workspaceId, notifyWorkspaceStatus, - } = params - if (!chatId) return undefined - - const userMsg = buildPersistedUserMessage({ - id: userMessageId, - content: message, - fileAttachments, - contexts, - }) - - const [updated] = await db - .update(copilotChats) - .set({ - messages: sql`${copilotChats.messages} || ${JSON.stringify([userMsg])}::jsonb`, - conversationId: userMessageId, - updatedAt: new Date(), - }) - .where(eq(copilotChats.id, chatId)) - .returning({ messages: copilotChats.messages }) - - if (notifyWorkspaceStatus && updated && workspaceId) { - taskPubSub?.publishStatusChanged({ workspaceId, chatId, type: 'started' }) - } + parentOtelContext, + } = params; + if (!chatId) return undefined; + + return withCopilotSpan( + TraceSpan.CopilotChatPersistUserMessage, + { + "db.system": "postgresql", + "db.sql.table": "copilot_chats", + "chat.id": chatId, + "chat.user_message_id": userMessageId, + "chat.message_bytes": message.length, + "chat.file_attachment_count": fileAttachments?.length ?? 0, + "chat.context_count": contexts?.length ?? 0, + ...(workspaceId ? { "workspace.id": workspaceId } : {}), + }, + async (span) => { + const userMsg = buildPersistedUserMessage({ + id: userMessageId, + content: message, + fileAttachments, + contexts, + }); + + const [updated] = await db + .update(copilotChats) + .set({ + messages: sql`${copilotChats.messages} || ${JSON.stringify([userMsg])}::jsonb`, + conversationId: userMessageId, + updatedAt: new Date(), + }) + .where(eq(copilotChats.id, chatId)) + .returning({ messages: copilotChats.messages }); + + const messagesAfter = Array.isArray(updated?.messages) + ? updated.messages + : undefined; + span.setAttributes({ + "chat.persist.outcome": updated ? "appended" : "chat_not_found", + "chat.messages_after": messagesAfter?.length ?? 0, + }); + + if (notifyWorkspaceStatus && updated && workspaceId) { + taskPubSub?.publishStatusChanged({ + workspaceId, + chatId, + type: "started", + }); + } - return Array.isArray(updated?.messages) ? updated.messages : undefined + return messagesAfter; + }, + parentOtelContext, + ); } async function buildInitialExecutionContext(params: { - userId: string - workflowId?: string - workspaceId?: string - chatId?: string - messageId: string - userTimezone?: string - requestMode: string + userId: string; + workflowId?: string; + workspaceId?: string; + chatId?: string; + messageId: string; + userTimezone?: string; + requestMode: string; }): Promise { - const { userId, workflowId, workspaceId, chatId, messageId, userTimezone, requestMode } = params + const { + userId, + workflowId, + workspaceId, + chatId, + messageId, + userTimezone, + requestMode, + } = params; if (workflowId && !workspaceId) { - const context = await prepareExecutionContext(userId, workflowId, chatId) + const context = await prepareExecutionContext(userId, workflowId, chatId); return { ...context, messageId, userTimezone, requestMode, copilotToolExecution: true, - } + }; } - const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId) + const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId); return { userId, - workflowId: workflowId ?? '', + workflowId: workflowId ?? "", workspaceId, chatId, decryptedEnvVars, @@ -326,85 +420,102 @@ async function buildInitialExecutionContext(params: { userTimezone, requestMode, copilotToolExecution: true, - } + }; } function buildOnComplete(params: { - chatId?: string - userMessageId: string - requestId: string - workspaceId?: string - notifyWorkspaceStatus: boolean + chatId?: string; + userMessageId: string; + requestId: string; + workspaceId?: string; + notifyWorkspaceStatus: boolean; }) { - const { chatId, userMessageId, requestId, workspaceId, notifyWorkspaceStatus } = params + const { + chatId, + userMessageId, + requestId, + workspaceId, + notifyWorkspaceStatus, + } = params; return async (result: OrchestratorResult) => { - if (!chatId) return + if (!chatId) return; try { await finalizeAssistantTurn({ chatId, userMessageId, ...(result.success - ? { assistantMessage: buildPersistedAssistantMessage(result, requestId) } + ? { + assistantMessage: buildPersistedAssistantMessage( + result, + requestId, + ), + } : {}), - }) + }); if (notifyWorkspaceStatus && workspaceId) { taskPubSub?.publishStatusChanged({ workspaceId, chatId, - type: 'completed', - }) + type: "completed", + }); } } catch (error) { logger.error(`[${requestId}] Failed to persist chat messages`, { chatId, - error: error instanceof Error ? error.message : 'Unknown error', - }) + error: error instanceof Error ? error.message : "Unknown error", + }); } - } + }; } function buildOnError(params: { - chatId?: string - userMessageId: string - requestId: string - workspaceId?: string - notifyWorkspaceStatus: boolean + chatId?: string; + userMessageId: string; + requestId: string; + workspaceId?: string; + notifyWorkspaceStatus: boolean; }) { - const { chatId, userMessageId, requestId, workspaceId, notifyWorkspaceStatus } = params + const { + chatId, + userMessageId, + requestId, + workspaceId, + notifyWorkspaceStatus, + } = params; return async () => { - if (!chatId) return + if (!chatId) return; try { - await finalizeAssistantTurn({ chatId, userMessageId }) + await finalizeAssistantTurn({ chatId, userMessageId }); if (notifyWorkspaceStatus && workspaceId) { taskPubSub?.publishStatusChanged({ workspaceId, chatId, - type: 'completed', - }) + type: "completed", + }); } } catch (error) { logger.error(`[${requestId}] Failed to finalize errored chat stream`, { chatId, - error: error instanceof Error ? error.message : 'Unknown error', - }) + error: error instanceof Error ? error.message : "Unknown error", + }); } - } + }; } async function resolveBranch(params: { - authenticatedUserId: string - workflowId?: string - workflowName?: string - workspaceId?: string - model?: string - mode?: UnifiedChatRequest['mode'] - provider?: string + authenticatedUserId: string; + workflowId?: string; + workflowName?: string; + workspaceId?: string; + model?: string; + mode?: UnifiedChatRequest["mode"]; + provider?: string; }): Promise { const { authenticatedUserId, @@ -414,40 +525,40 @@ async function resolveBranch(params: { model, mode, provider, - } = params + } = params; if (providedWorkflowId || workflowName) { const resolved = await resolveWorkflowIdForUser( authenticatedUserId, providedWorkflowId, workflowName, - requestedWorkspaceId - ) - if (resolved.status !== 'resolved') { - return createBadRequestResponse(resolved.message) + requestedWorkspaceId, + ); + if (resolved.status !== "resolved") { + return createBadRequestResponse(resolved.message); } - const resolvedWorkflowId = resolved.workflowId - let resolvedWorkspaceId = requestedWorkspaceId + const resolvedWorkflowId = resolved.workflowId; + let resolvedWorkspaceId = requestedWorkspaceId; if (!resolvedWorkspaceId) { try { - const workflow = await getWorkflowById(resolvedWorkflowId) - resolvedWorkspaceId = workflow?.workspaceId ?? undefined + const workflow = await getWorkflowById(resolvedWorkflowId); + resolvedWorkspaceId = workflow?.workspaceId ?? undefined; } catch { // best effort; downstream calls can still proceed } } - const selectedModel = model || DEFAULT_MODEL + const selectedModel = model || DEFAULT_MODEL; return { - kind: 'workflow', + kind: "workflow", workflowId: resolvedWorkflowId, workflowName: resolved.workflowName, workspaceId: resolvedWorkspaceId, selectedModel, - mode: mode ?? 'agent', + mode: mode ?? "agent", provider, - goRoute: '/api/copilot', + goRoute: "/api/copilot", titleModel: selectedModel, titleProvider: provider, notifyWorkspaceStatus: false, @@ -460,7 +571,7 @@ async function resolveBranch(params: { workspaceId: payloadParams.workspaceId, userId: payloadParams.userId, userMessageId: payloadParams.userMessageId, - mode: payloadParams.mode ?? 'agent', + mode: payloadParams.mode ?? "agent", model: selectedModel, provider: payloadParams.provider, contexts: payloadParams.contexts, @@ -472,9 +583,14 @@ async function resolveBranch(params: { userPermission: payloadParams.userPermission, userTimezone: payloadParams.userTimezone, }, - { selectedModel } + { selectedModel }, ), - buildExecutionContext: async ({ userId, chatId, userTimezone, messageId }) => + buildExecutionContext: async ({ + userId, + chatId, + userTimezone, + messageId, + }) => buildInitialExecutionContext({ userId, workflowId: resolvedWorkflowId, @@ -482,19 +598,21 @@ async function resolveBranch(params: { chatId, messageId, userTimezone, - requestMode: mode ?? 'agent', + requestMode: mode ?? "agent", }), - } + }; } if (!requestedWorkspaceId) { - return createBadRequestResponse('workspaceId is required when workflowId is not provided') + return createBadRequestResponse( + "workspaceId is required when workflowId is not provided", + ); } return { - kind: 'workspace', + kind: "workspace", workspaceId: requestedWorkspaceId, - goRoute: '/api/mothership', + goRoute: "/api/mothership", titleModel: DEFAULT_MODEL, notifyWorkspaceStatus: true, buildPayload: async (payloadParams) => @@ -504,8 +622,8 @@ async function resolveBranch(params: { workspaceId: requestedWorkspaceId, userId: payloadParams.userId, userMessageId: payloadParams.userMessageId, - mode: 'agent', - model: '', + mode: "agent", + model: "", contexts: payloadParams.contexts, fileAttachments: payloadParams.fileAttachments, chatId: payloadParams.chatId, @@ -513,254 +631,315 @@ async function resolveBranch(params: { userPermission: payloadParams.userPermission, userTimezone: payloadParams.userTimezone, }, - { selectedModel: '' } + { selectedModel: "" }, ), - buildExecutionContext: async ({ userId, chatId, userTimezone, messageId }) => + buildExecutionContext: async ({ + userId, + chatId, + userTimezone, + messageId, + }) => buildInitialExecutionContext({ userId, workspaceId: requestedWorkspaceId, chatId, messageId, userTimezone, - requestMode: 'agent', + requestMode: "agent", }), - } + }; } export async function handleUnifiedChatPost(req: NextRequest) { - const tracker = createRequestTracker(false) - let actualChatId: string | undefined - let userMessageId = '' - let chatStreamLockAcquired = false + const tracker = createRequestTracker(false); + let actualChatId: string | undefined; + let userMessageId = ""; + let chatStreamLockAcquired = false; + // Started once we know the streamId (= userMessageId). Every subsequent + // span (persistUserMessage, createRunSegment, the whole SSE stream, etc.) + // nests under this root via AsyncLocalStorage / explicit propagation, + // and the stream's terminal code path calls finish() when the request + // actually ends. Errors thrown from the handler before the stream + // starts are finished here in the catch below. + let otelRoot: ReturnType | undefined; + const executionId = crypto.randomUUID(); + const runId = crypto.randomUUID(); try { - const session = await getSession() + const session = await getSession(); if (!session?.user?.id) { - return createUnauthorizedResponse() + return createUnauthorizedResponse(); } - const authenticatedUserId = session.user.id + const authenticatedUserId = session.user.id; - const body = ChatMessageSchema.parse(await req.json()) - const normalizedContexts = normalizeContexts(body.contexts) - userMessageId = body.userMessageId || crypto.randomUUID() - - const branch = await resolveBranch({ - authenticatedUserId, - workflowId: body.workflowId, - workflowName: body.workflowName, - workspaceId: body.workspaceId, - model: body.model, - mode: body.mode, - provider: body.provider, - }) - if (branch instanceof NextResponse) { - return branch - } + const body = ChatMessageSchema.parse(await req.json()); + const normalizedContexts = normalizeContexts(body.contexts); + userMessageId = body.userMessageId || crypto.randomUUID(); - let currentChat: ChatLoadResult['chat'] = null - let conversationHistory: unknown[] = [] - let chatIsNew = false - actualChatId = body.chatId + otelRoot = startCopilotOtelRoot({ + requestId: tracker.requestId, + streamId: userMessageId, + executionId, + runId, + transport: "stream", + }); + + // Wrap the rest of the handler so every nested withCopilotSpan / + // withDbSpan (persistUserMessage, createRunSegment, resolveBranch DB + // hits) attaches to the root via AsyncLocalStorage. Before this + // wrapper those spans became orphan roots and each showed up as a + // separate trace in Jaeger. + return await otelContextApi.with(otelRoot.context, async () => { + const branch = await resolveBranch({ + authenticatedUserId, + workflowId: body.workflowId, + workflowName: body.workflowName, + workspaceId: body.workspaceId, + model: body.model, + mode: body.mode, + provider: body.provider, + }); + if (branch instanceof NextResponse) { + return branch; + } - if (body.chatId || body.createNewChat) { - const chatResult = await resolveOrCreateChat({ - chatId: body.chatId, - userId: authenticatedUserId, - ...(branch.kind === 'workflow' ? { workflowId: branch.workflowId } : {}), - workspaceId: branch.workspaceId, - model: branch.titleModel, - type: branch.kind === 'workflow' ? 'copilot' : 'mothership', - }) - currentChat = chatResult.chat - actualChatId = chatResult.chatId || body.chatId - chatIsNew = chatResult.isNew - conversationHistory = Array.isArray(chatResult.conversationHistory) - ? chatResult.conversationHistory - : [] - - if (body.chatId && !currentChat) { - return NextResponse.json({ error: 'Chat not found' }, { status: 404 }) + let currentChat: ChatLoadResult["chat"] = null; + let conversationHistory: unknown[] = []; + let chatIsNew = false; + actualChatId = body.chatId; + + if (body.chatId || body.createNewChat) { + const chatResult = await resolveOrCreateChat({ + chatId: body.chatId, + userId: authenticatedUserId, + ...(branch.kind === "workflow" + ? { workflowId: branch.workflowId } + : {}), + workspaceId: branch.workspaceId, + model: branch.titleModel, + type: branch.kind === "workflow" ? "copilot" : "mothership", + }); + currentChat = chatResult.chat; + actualChatId = chatResult.chatId || body.chatId; + chatIsNew = chatResult.isNew; + conversationHistory = Array.isArray(chatResult.conversationHistory) + ? chatResult.conversationHistory + : []; + + if (body.chatId && !currentChat) { + return NextResponse.json( + { error: "Chat not found" }, + { status: 404 }, + ); + } } - } - if (chatIsNew && actualChatId && body.resourceAttachments?.length) { - await persistChatResources( - actualChatId, - body.resourceAttachments.map((r) => ({ - type: r.type, - id: r.id, - title: r.title ?? GENERIC_RESOURCE_TITLE[r.type], - })) - ) - } + if (chatIsNew && actualChatId && body.resourceAttachments?.length) { + await persistChatResources( + actualChatId, + body.resourceAttachments.map((r) => ({ + type: r.type, + id: r.id, + title: r.title ?? GENERIC_RESOURCE_TITLE[r.type], + })), + ); + } - if (actualChatId) { - chatStreamLockAcquired = await acquirePendingChatStream(actualChatId, userMessageId) - if (!chatStreamLockAcquired) { - const activeStreamId = await getPendingChatStreamId(actualChatId) - return NextResponse.json( - { - error: 'A response is already in progress for this chat.', - ...(activeStreamId ? { activeStreamId } : {}), - }, - { status: 409 } - ) + if (actualChatId) { + chatStreamLockAcquired = await acquirePendingChatStream( + actualChatId, + userMessageId, + ); + if (!chatStreamLockAcquired) { + const activeStreamId = await getPendingChatStreamId(actualChatId); + return NextResponse.json( + { + error: "A response is already in progress for this chat.", + ...(activeStreamId ? { activeStreamId } : {}), + }, + { status: 409 }, + ); + } } - } - const workspaceId = branch.workspaceId - const userPermissionPromise = workspaceId - ? getUserEntityPermissions(authenticatedUserId, 'workspace', workspaceId).catch((error) => { - logger.warn('Failed to load user permissions', { - error: error instanceof Error ? error.message : String(error), + const workspaceId = branch.workspaceId; + const userPermissionPromise = workspaceId + ? getUserEntityPermissions( + authenticatedUserId, + "workspace", workspaceId, + ).catch((error) => { + logger.warn("Failed to load user permissions", { + error: error instanceof Error ? error.message : String(error), + workspaceId, + }); + return null; }) - return null - }) - : Promise.resolve(null) - const workspaceContextPromise = - branch.kind === 'workspace' - ? generateWorkspaceContext(branch.workspaceId, authenticatedUserId) - : Promise.resolve(undefined) - const agentContextsPromise = resolveAgentContexts({ - contexts: normalizedContexts, - resourceAttachments: body.resourceAttachments, - userId: authenticatedUserId, - message: body.message, - workspaceId, - chatId: actualChatId, - requestId: tracker.requestId, - }) - const persistedMessagesPromise = persistUserMessage({ - chatId: actualChatId, - userMessageId, - message: body.message, - fileAttachments: body.fileAttachments, - contexts: normalizedContexts, - workspaceId, - notifyWorkspaceStatus: branch.notifyWorkspaceStatus, - }) - const executionContextPromise = branch.buildExecutionContext({ - userId: authenticatedUserId, - chatId: actualChatId, - userTimezone: body.userTimezone, - messageId: userMessageId, - }) - - const [agentContexts, userPermission, workspaceContext, persistedMessages, executionContext] = - await Promise.all([ + : Promise.resolve(null); + const workspaceContextPromise = + branch.kind === "workspace" + ? generateWorkspaceContext(branch.workspaceId, authenticatedUserId) + : Promise.resolve(undefined); + const agentContextsPromise = resolveAgentContexts({ + contexts: normalizedContexts, + resourceAttachments: body.resourceAttachments, + userId: authenticatedUserId, + message: body.message, + workspaceId, + chatId: actualChatId, + requestId: tracker.requestId, + }); + const persistedMessagesPromise = persistUserMessage({ + chatId: actualChatId, + userMessageId, + message: body.message, + fileAttachments: body.fileAttachments, + contexts: normalizedContexts, + workspaceId, + notifyWorkspaceStatus: branch.notifyWorkspaceStatus, + parentOtelContext: otelRoot!.context, + }); + const executionContextPromise = branch.buildExecutionContext({ + userId: authenticatedUserId, + chatId: actualChatId, + userTimezone: body.userTimezone, + messageId: userMessageId, + }); + + const [ + agentContexts, + userPermission, + workspaceContext, + persistedMessages, + executionContext, + ] = await Promise.all([ agentContextsPromise, userPermissionPromise, workspaceContextPromise, persistedMessagesPromise, executionContextPromise, - ]) - - if (persistedMessages) { - conversationHistory = persistedMessages.filter((message) => { - const record = message as Record - return record.id !== userMessageId - }) - } + ]); - const requestPayload = - branch.kind === 'workflow' - ? await branch.buildPayload({ - message: body.message, - userId: authenticatedUserId, - userMessageId, - chatId: actualChatId, - contexts: agentContexts, - fileAttachments: body.fileAttachments, - userPermission: userPermission ?? undefined, - userTimezone: body.userTimezone, - workflowId: branch.workflowId, - workflowName: branch.workflowName, - workspaceId: branch.workspaceId, - mode: branch.mode, - provider: branch.provider, - commands: body.commands, - prefetch: body.prefetch, - implicitFeedback: body.implicitFeedback, - }) - : await branch.buildPayload({ - message: body.message, - userId: authenticatedUserId, - userMessageId, - chatId: actualChatId, - contexts: agentContexts, - fileAttachments: body.fileAttachments, - userPermission: userPermission ?? undefined, - userTimezone: body.userTimezone, - workspaceContext, - }) + if (persistedMessages) { + conversationHistory = persistedMessages.filter((message) => { + const record = message as Record; + return record.id !== userMessageId; + }); + } - const executionId = crypto.randomUUID() - const runId = crypto.randomUUID() + const requestPayload = + branch.kind === "workflow" + ? await branch.buildPayload({ + message: body.message, + userId: authenticatedUserId, + userMessageId, + chatId: actualChatId, + contexts: agentContexts, + fileAttachments: body.fileAttachments, + userPermission: userPermission ?? undefined, + userTimezone: body.userTimezone, + workflowId: branch.workflowId, + workflowName: branch.workflowName, + workspaceId: branch.workspaceId, + mode: branch.mode, + provider: branch.provider, + commands: body.commands, + prefetch: body.prefetch, + implicitFeedback: body.implicitFeedback, + }) + : await branch.buildPayload({ + message: body.message, + userId: authenticatedUserId, + userMessageId, + chatId: actualChatId, + contexts: agentContexts, + fileAttachments: body.fileAttachments, + userPermission: userPermission ?? undefined, + userTimezone: body.userTimezone, + workspaceContext, + }); + + if (actualChatId) { + otelRoot!.span.setAttribute("chat.id", actualChatId); + } + if (workspaceId) { + otelRoot!.span.setAttribute("workspace.id", workspaceId); + } - const stream = createSSEStream({ - requestPayload, - userId: authenticatedUserId, - streamId: userMessageId, - executionId, - runId, - chatId: actualChatId, - currentChat, - isNewChat: conversationHistory.length === 0, - message: body.message, - titleModel: branch.titleModel, - ...(branch.titleProvider ? { titleProvider: branch.titleProvider } : {}), - requestId: tracker.requestId, - workspaceId, - orchestrateOptions: { + const stream = createSSEStream({ + requestPayload, userId: authenticatedUserId, - ...(branch.kind === 'workflow' ? { workflowId: branch.workflowId } : {}), - ...(branch.kind === 'workspace' ? { workspaceId: branch.workspaceId } : {}), - chatId: actualChatId, + streamId: userMessageId, executionId, runId, - goRoute: branch.goRoute, - autoExecuteTools: true, - interactive: true, - executionContext, - onComplete: buildOnComplete({ - chatId: actualChatId, - userMessageId, - requestId: tracker.requestId, - workspaceId, - notifyWorkspaceStatus: branch.notifyWorkspaceStatus, - }), - onError: buildOnError({ + chatId: actualChatId, + currentChat, + isNewChat: conversationHistory.length === 0, + message: body.message, + titleModel: branch.titleModel, + ...(branch.titleProvider + ? { titleProvider: branch.titleProvider } + : {}), + requestId: tracker.requestId, + workspaceId, + otelRoot: otelRoot!, + orchestrateOptions: { + userId: authenticatedUserId, + ...(branch.kind === "workflow" + ? { workflowId: branch.workflowId } + : {}), + ...(branch.kind === "workspace" + ? { workspaceId: branch.workspaceId } + : {}), chatId: actualChatId, - userMessageId, - requestId: tracker.requestId, - workspaceId, - notifyWorkspaceStatus: branch.notifyWorkspaceStatus, - }), - }, - }) + executionId, + runId, + goRoute: branch.goRoute, + autoExecuteTools: true, + interactive: true, + executionContext, + onComplete: buildOnComplete({ + chatId: actualChatId, + userMessageId, + requestId: tracker.requestId, + workspaceId, + notifyWorkspaceStatus: branch.notifyWorkspaceStatus, + }), + onError: buildOnError({ + chatId: actualChatId, + userMessageId, + requestId: tracker.requestId, + workspaceId, + notifyWorkspaceStatus: branch.notifyWorkspaceStatus, + }), + }, + }); - return new Response(stream, { headers: SSE_RESPONSE_HEADERS }) + return new Response(stream, { headers: SSE_RESPONSE_HEADERS }); + }); // end otelContextApi.with } catch (error) { if (chatStreamLockAcquired && actualChatId && userMessageId) { - await releasePendingChatStream(actualChatId, userMessageId) + await releasePendingChatStream(actualChatId, userMessageId); } + otelRoot?.finish("error", error); if (error instanceof z.ZodError) { return NextResponse.json( - { error: 'Invalid request data', details: error.errors }, - { status: 400 } - ) + { error: "Invalid request data", details: error.errors }, + { status: 400 }, + ); } logger.error(`[${tracker.requestId}] Error handling unified chat request`, { - error: error instanceof Error ? error.message : 'Unknown error', + error: error instanceof Error ? error.message : "Unknown error", stack: error instanceof Error ? error.stack : undefined, - }) + }); return NextResponse.json( - { error: error instanceof Error ? error.message : 'Internal server error' }, - { status: 500 } - ) + { + error: error instanceof Error ? error.message : "Internal server error", + }, + { status: 500 }, + ); } } diff --git a/apps/sim/lib/copilot/chat/terminal-state.ts b/apps/sim/lib/copilot/chat/terminal-state.ts index f09c1c861bb..f5c121239c6 100644 --- a/apps/sim/lib/copilot/chat/terminal-state.ts +++ b/apps/sim/lib/copilot/chat/terminal-state.ts @@ -2,6 +2,8 @@ import { db } from '@sim/db' import { copilotChats } from '@sim/db/schema' import { and, eq, sql } from 'drizzle-orm' import type { PersistedMessage } from '@/lib/copilot/chat/persisted-message' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' +import { withCopilotSpan } from '@/lib/copilot/request/otel' interface FinalizeAssistantTurnParams { chatId: string @@ -19,39 +21,62 @@ export async function finalizeAssistantTurn({ userMessageId, assistantMessage, }: FinalizeAssistantTurnParams): Promise { - const [row] = await db - .select({ messages: copilotChats.messages }) - .from(copilotChats) - .where(eq(copilotChats.id, chatId)) - .limit(1) + return withCopilotSpan( + TraceSpan.CopilotChatFinalizeAssistantTurn, + { + 'db.system': 'postgresql', + 'db.sql.table': 'copilot_chats', + 'chat.id': chatId, + 'chat.user_message_id': userMessageId, + 'chat.has_assistant_message': !!assistantMessage, + }, + async (span) => { + const [row] = await db + .select({ messages: copilotChats.messages }) + .from(copilotChats) + .where(eq(copilotChats.id, chatId)) + .limit(1) - const messages: Record[] = Array.isArray(row?.messages) ? row.messages : [] - const userIdx = messages.findIndex((message) => message.id === userMessageId) - const alreadyHasResponse = - userIdx >= 0 && - userIdx + 1 < messages.length && - (messages[userIdx + 1] as Record)?.role === 'assistant' - const canAppendAssistant = userIdx >= 0 && userIdx === messages.length - 1 && !alreadyHasResponse - const updateWhere = and( - eq(copilotChats.id, chatId), - eq(copilotChats.conversationId, userMessageId) - ) + const messages: Record[] = Array.isArray(row?.messages) ? row.messages : [] + span.setAttribute('chat.existing_message_count', messages.length) + const userIdx = messages.findIndex((message) => message.id === userMessageId) + const alreadyHasResponse = + userIdx >= 0 && + userIdx + 1 < messages.length && + (messages[userIdx + 1] as Record)?.role === 'assistant' + const canAppendAssistant = + userIdx >= 0 && userIdx === messages.length - 1 && !alreadyHasResponse + const updateWhere = and( + eq(copilotChats.id, chatId), + eq(copilotChats.conversationId, userMessageId) + ) - const baseUpdate = { - conversationId: null, - updatedAt: new Date(), - } + const baseUpdate = { + conversationId: null, + updatedAt: new Date(), + } - if (assistantMessage && canAppendAssistant) { - await db - .update(copilotChats) - .set({ - ...baseUpdate, - messages: sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb`, - }) - .where(updateWhere) - return - } + if (assistantMessage && canAppendAssistant) { + await db + .update(copilotChats) + .set({ + ...baseUpdate, + messages: sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb`, + }) + .where(updateWhere) + span.setAttribute('chat.finalize.outcome', 'appended_assistant') + return + } - await db.update(copilotChats).set(baseUpdate).where(updateWhere) + await db.update(copilotChats).set(baseUpdate).where(updateWhere) + span.setAttribute( + 'chat.finalize.outcome', + assistantMessage + ? alreadyHasResponse + ? 'assistant_already_persisted' + : 'stale_user_message' + : 'cleared_stream_marker_only', + ) + }, + ) } diff --git a/apps/sim/lib/copilot/generated/trace-spans-v1.ts b/apps/sim/lib/copilot/generated/trace-spans-v1.ts new file mode 100644 index 00000000000..c6c383beb9c --- /dev/null +++ b/apps/sim/lib/copilot/generated/trace-spans-v1.ts @@ -0,0 +1,107 @@ +// AUTO-GENERATED FILE. DO NOT EDIT. +// +// Source: copilot/copilot/contracts/trace-spans-v1.schema.json +// Regenerate with: bun run trace-spans-contract:generate +// +// Canonical mothership OTel span names. Call sites should reference +// `TraceSpan.` (e.g. `TraceSpan.CopilotVfsReadFile`) +// rather than raw string literals, so the Go-side contract is the +// single source of truth and typos become compile errors. + +export const TraceSpan = { + AnthropicCountTokens: "anthropic.count_tokens", + AsyncToolStoreSet: "async_tool_store.set", + AuthValidateKey: "auth.validate_key", + ChatContinueWithToolResults: "chat.continue_with_tool_results", + ContextReduce: "context.reduce", + ContextSummarizeChunk: "context.summarize_chunk", + CopilotAnalyticsFlush: "copilot.analytics.flush", + CopilotAnalyticsSaveRequest: "copilot.analytics.save_request", + CopilotAnalyticsUpdateBilling: "copilot.analytics.update_billing", + CopilotAsyncRunsClaimCompleted: "copilot.async_runs.claim_completed", + CopilotAsyncRunsCreateRunCheckpoint: "copilot.async_runs.create_run_checkpoint", + CopilotAsyncRunsCreateRunSegment: "copilot.async_runs.create_run_segment", + CopilotAsyncRunsGetAsyncToolCall: "copilot.async_runs.get_async_tool_call", + CopilotAsyncRunsGetLatestForExecution: "copilot.async_runs.get_latest_for_execution", + CopilotAsyncRunsGetLatestForStream: "copilot.async_runs.get_latest_for_stream", + CopilotAsyncRunsGetMany: "copilot.async_runs.get_many", + CopilotAsyncRunsGetRunSegment: "copilot.async_runs.get_run_segment", + CopilotAsyncRunsListForRun: "copilot.async_runs.list_for_run", + CopilotAsyncRunsMarkAsyncToolStatus: "copilot.async_runs.mark_async_tool_status", + CopilotAsyncRunsReleaseClaim: "copilot.async_runs.release_claim", + CopilotAsyncRunsUpdateRunStatus: "copilot.async_runs.update_run_status", + CopilotAsyncRunsUpsertAsyncToolCall: "copilot.async_runs.upsert_async_tool_call", + CopilotAuthValidateApiKey: "copilot.auth.validate_api_key", + CopilotBillingUpdateCost: "copilot.billing.update_cost", + CopilotChatFinalizeAssistantTurn: "copilot.chat.finalize_assistant_turn", + CopilotChatPersistUserMessage: "copilot.chat.persist_user_message", + CopilotFinalizeStream: "copilot.finalize_stream", + CopilotRecoveryCheckReplayGap: "copilot.recovery.check_replay_gap", + CopilotResumeRequest: "copilot.resume.request", + CopilotSubagentExecute: "copilot.subagent.execute", + CopilotToolsHandleResourceSideEffects: "copilot.tools.handle_resource_side_effects", + CopilotToolsWriteCsvToTable: "copilot.tools.write_csv_to_table", + CopilotToolsWriteOutputFile: "copilot.tools.write_output_file", + CopilotToolsWriteOutputTable: "copilot.tools.write_output_table", + CopilotVfsPrepareImage: "copilot.vfs.prepare_image", + CopilotVfsReadFile: "copilot.vfs.read_file", + GenAiAgentExecute: "gen_ai.agent.execute", + LlmStream: "llm.stream", + ProviderRouterCountTokens: "provider.router.count_tokens", + ProviderRouterRoute: "provider.router.route", + SimUpdateCost: "sim.update_cost", + SimValidateApiKey: "sim.validate_api_key", + ToolAsyncWaiterWait: "tool.async_waiter.wait", + ToolExecute: "tool.execute", +} as const; + +export type TraceSpanKey = keyof typeof TraceSpan; +export type TraceSpanValue = (typeof TraceSpan)[TraceSpanKey]; + +/** Readonly sorted list of every canonical span name. */ +export const TraceSpanValues: readonly TraceSpanValue[] = [ + "anthropic.count_tokens", + "async_tool_store.set", + "auth.validate_key", + "chat.continue_with_tool_results", + "context.reduce", + "context.summarize_chunk", + "copilot.analytics.flush", + "copilot.analytics.save_request", + "copilot.analytics.update_billing", + "copilot.async_runs.claim_completed", + "copilot.async_runs.create_run_checkpoint", + "copilot.async_runs.create_run_segment", + "copilot.async_runs.get_async_tool_call", + "copilot.async_runs.get_latest_for_execution", + "copilot.async_runs.get_latest_for_stream", + "copilot.async_runs.get_many", + "copilot.async_runs.get_run_segment", + "copilot.async_runs.list_for_run", + "copilot.async_runs.mark_async_tool_status", + "copilot.async_runs.release_claim", + "copilot.async_runs.update_run_status", + "copilot.async_runs.upsert_async_tool_call", + "copilot.auth.validate_api_key", + "copilot.billing.update_cost", + "copilot.chat.finalize_assistant_turn", + "copilot.chat.persist_user_message", + "copilot.finalize_stream", + "copilot.recovery.check_replay_gap", + "copilot.resume.request", + "copilot.subagent.execute", + "copilot.tools.handle_resource_side_effects", + "copilot.tools.write_csv_to_table", + "copilot.tools.write_output_file", + "copilot.tools.write_output_table", + "copilot.vfs.prepare_image", + "copilot.vfs.read_file", + "gen_ai.agent.execute", + "llm.stream", + "provider.router.count_tokens", + "provider.router.route", + "sim.update_cost", + "sim.validate_api_key", + "tool.async_waiter.wait", + "tool.execute", +] as const; diff --git a/apps/sim/lib/copilot/request/go/propagation.ts b/apps/sim/lib/copilot/request/go/propagation.ts index 2268049bc6d..0a30011a483 100644 --- a/apps/sim/lib/copilot/request/go/propagation.ts +++ b/apps/sim/lib/copilot/request/go/propagation.ts @@ -8,6 +8,19 @@ const headerSetter = { }, }; +const headerGetter = { + keys(carrier: Headers): string[] { + const out: string[] = []; + carrier.forEach((_, key) => { + out.push(key); + }); + return out; + }, + get(carrier: Headers, key: string): string | undefined { + return carrier.get(key) ?? undefined; + }, +}; + /** * Injects W3C trace context (traceparent, tracestate) into outbound HTTP * headers so Go-side spans join the same OTel trace tree as the calling @@ -24,3 +37,21 @@ export function traceHeaders( propagator.inject(otelContext ?? context.active(), headers, headerSetter); return headers; } + +/** + * Extracts W3C trace context from incoming request headers (traceparent / + * tracestate) and returns an OTel Context seeded with the upstream span. + * + * Use this at the top of inbound Sim route handlers that Go calls into + * (e.g. /api/billing/update-cost, /api/copilot/api-keys/validate) so the + * Sim-side span becomes a proper child of the Go-side client span in the + * same trace — closing the round trip in Jaeger. + * + * When no traceparent is present (e.g. calls from a browser or a client + * that hasn't been instrumented), this returns `context.active()` + * unchanged, and any span started under it becomes a new root — the same + * behavior as before this helper existed. + */ +export function contextFromRequestHeaders(headers: Headers): Context { + return propagator.extract(context.active(), headers, headerGetter); +} diff --git a/apps/sim/lib/copilot/request/lifecycle/start.ts b/apps/sim/lib/copilot/request/lifecycle/start.ts index b4609d15d19..b76da327f76 100644 --- a/apps/sim/lib/copilot/request/lifecycle/start.ts +++ b/apps/sim/lib/copilot/request/lifecycle/start.ts @@ -1,4 +1,4 @@ -import type { Context } from "@opentelemetry/api"; +import { context as otelContextApi, type Context } from "@opentelemetry/api"; import { db } from "@sim/db"; import { copilotChats } from "@sim/db/schema"; import { createLogger } from "@sim/logger"; @@ -26,7 +26,10 @@ import { unregisterActiveStream, } from "@/lib/copilot/request/session"; import { SSE_RESPONSE_HEADERS } from "@/lib/copilot/request/session/sse"; -import { withCopilotOtelContext } from "@/lib/copilot/request/otel"; +import { + type CopilotLifecycleOutcome, + startCopilotOtelRoot, +} from "@/lib/copilot/request/otel"; import { reportTrace, TraceCollector } from "@/lib/copilot/request/trace"; import { taskPubSub } from "@/lib/copilot/tasks"; import { env } from "@/lib/core/config/env"; @@ -54,6 +57,14 @@ export interface StreamingOrchestrationParams { requestId: string; workspaceId?: string; orchestrateOptions: Omit; + /** + * Pre-started gen_ai.agent.execute root returned by + * `startCopilotOtelRoot`. When provided, this stream binds every nested + * span to that root and calls `finish()` on termination. When omitted, + * this function starts its own root internally (kept for back-compat + * with the headless path). + */ + otelRoot?: ReturnType; } export function createSSEStream( @@ -74,8 +85,27 @@ export function createSSEStream( requestId, workspaceId, orchestrateOptions, + otelRoot, } = params; + // If the caller (POST handler) already started the gen_ai.agent.execute + // root so that pre-stream setup work (persistUserMessage, resource + // loads, etc.) could nest under it, reuse that root and finish it from + // our terminal code path via the idempotent `finish`. Otherwise start + // our own so the stream still gets a proper OTel trace. + const activeOtelRoot = + otelRoot ?? + startCopilotOtelRoot({ + requestId, + route: orchestrateOptions.goRoute, + chatId, + workflowId: orchestrateOptions.workflowId, + executionId, + runId, + streamId, + transport: "stream", + }); + const abortController = new AbortController(); registerActiveStream(streamId, abortController); @@ -87,18 +117,16 @@ export function createSSEStream( async start(controller) { publisher.attach(controller); - await withCopilotOtelContext( - { - requestId, - route: orchestrateOptions.goRoute, - chatId, - workflowId: orchestrateOptions.workflowId, - executionId, - runId, - streamId, - transport: "stream", - }, - async (otelContext) => { + // Re-enter the root OTel context. Node's AsyncLocalStorage does + // not survive the Next.js handler -> ReadableStream.start boundary, + // so nested `withCopilotSpan` / `withDbSpan` calls would otherwise + // orphan into new traces. + await otelContextApi.with(activeOtelRoot.context, async () => { + const otelContext = activeOtelRoot.context; + let rootOutcome: CopilotLifecycleOutcome = + RequestTraceV1Outcome.error; + let rootError: unknown = undefined; + try { const requestSpan = collector.startSpan( "Mothership Request", "request", @@ -108,7 +136,7 @@ export function createSSEStream( runId, }, ); - let outcome: "success" | "error" | "cancelled" = "error"; + let outcome: CopilotLifecycleOutcome = RequestTraceV1Outcome.error; let lifecycleResult: | { usage?: { prompt: number; completion: number }; @@ -262,7 +290,7 @@ export function createSSEStream( await cleanupAbortMarker(streamId); const trace = collector.build({ - outcome: outcome as "success" | "error" | "cancelled", + outcome, simRequestId: requestId, streamId, chatId, @@ -272,9 +300,33 @@ export function createSSEStream( cost: lifecycleResult?.cost, }); reportTrace(trace, otelContext).catch(() => {}); + rootOutcome = outcome; + if (lifecycleResult?.usage) { + activeOtelRoot.span.setAttributes({ + "gen_ai.usage.input_tokens": lifecycleResult.usage.prompt ?? 0, + "gen_ai.usage.output_tokens": + lifecycleResult.usage.completion ?? 0, + }); + } + if (lifecycleResult?.cost) { + activeOtelRoot.span.setAttributes({ + "billing.cost.input_usd": lifecycleResult.cost.input ?? 0, + "billing.cost.output_usd": lifecycleResult.cost.output ?? 0, + "billing.cost.total_usd": lifecycleResult.cost.total ?? 0, + }); + } } - }, - ); + } catch (error) { + rootOutcome = RequestTraceV1Outcome.error; + rootError = error; + throw error; + } finally { + // `finish` is idempotent, so it's safe whether the POST + // handler started the root (and may also call finish on an + // error path before the stream ran) or we did. + activeOtelRoot.finish(rootOutcome, rootError); + } + }); }, cancel() { publisher.markDisconnected(); diff --git a/apps/sim/lib/copilot/request/otel.ts b/apps/sim/lib/copilot/request/otel.ts index ec04c6630df..15e2425c001 100644 --- a/apps/sim/lib/copilot/request/otel.ts +++ b/apps/sim/lib/copilot/request/otel.ts @@ -1,6 +1,8 @@ import { randomBytes } from "crypto"; import { context, + ROOT_CONTEXT, + SpanKind, SpanStatusCode, TraceFlags, trace, @@ -8,6 +10,19 @@ import { type Span, type SpanContext, } from "@opentelemetry/api"; +import type { RequestTraceV1Outcome } from "@/lib/copilot/generated/request-trace-v1"; +import { TraceSpan } from "@/lib/copilot/generated/trace-spans-v1"; +import { contextFromRequestHeaders } from "@/lib/copilot/request/go/propagation"; + +/** + * Reuse the generated RequestTraceV1Outcome string values for every + * lifecycle outcome field. This keeps our OTel attributes, internal + * TraceCollector outcomes, and the trace-ingestion wire contract all + * using the same three strings ("success" | "error" | "cancelled") + * without scattering the literals through the codebase. + */ +export type CopilotLifecycleOutcome = + (typeof RequestTraceV1Outcome)[keyof typeof RequestTraceV1Outcome]; /** * Resolve the tracer lazily on every call. With Next.js 16 + Turbopack dev @@ -26,6 +41,111 @@ function getTracer() { return getCopilotTracer(); } +/** + * Wrap an inbound Next.js route handler that Go calls into (e.g. billing + * update-cost, api-key validate) so the Sim-side work shows up as a + * child of the originating Go span in the same trace. + * + * Reads `traceparent` / `tracestate` from the request headers, installs + * that remote span as the active parent, and starts a server-kind OTel + * span around `fn`. Any `withCopilotSpan`/`withDbSpan`/etc. call below + * nests automatically via AsyncLocalStorage. + * + * If the request has no trace context (e.g. hand-rolled curl, browser + * test), this still produces a valid root span for the handler — you + * just won't see the Go-side parent. + */ +export async function withIncomingGoSpan( + headers: Headers, + spanName: string, + attributes: Record | undefined, + fn: (span: Span) => Promise, +): Promise { + const parentContext = contextFromRequestHeaders(headers); + const tracer = getTracer(); + return tracer.startActiveSpan( + spanName, + { kind: SpanKind.SERVER, attributes }, + parentContext, + async (span) => { + try { + const result = await fn(span); + span.setStatus({ code: SpanStatusCode.OK }); + return result; + } catch (error) { + span.setStatus({ + code: SpanStatusCode.ERROR, + message: error instanceof Error ? error.message : String(error), + }); + span.recordException( + error instanceof Error ? error : new Error(String(error)), + ); + throw error; + } finally { + span.end(); + } + }, + ); +} + +/** + * Generic helper for wrapping a copilot-lifecycle operation in an OTel + * span. Use this for post-tool processing, session recovery, subagent + * orchestration, async-runs DB calls, etc. — anywhere the work is part + * of a mothership request and we want it reflected in the external OTLP + * trace. + * + * The returned span honors the currently-active OTel context, so it + * threads under `gen_ai.agent.execute` (or a `tool.execute` parent) if + * one is live. If there's no active span, it becomes a root — which is + * almost never what you want; call this from inside a mothership request + * handler, not from arbitrary background code. + */ +export async function withCopilotSpan( + spanName: string, + attributes: Record | undefined, + fn: (span: Span) => Promise, + /** + * Optional explicit parent context. Useful when the caller is in a + * code path where Next.js / Turbopack / multiple awaits can drop the + * AsyncLocalStorage-tracked context we installed at the top of the + * request — passing the captured root context explicitly guarantees + * the new span parents correctly instead of falling back to whatever + * framework span is currently active (which then gets dropped by our + * sampler, stranding this span in the trace). + */ + parentContext?: Context, +): Promise { + const tracer = getTracer(); + const runBody = async (span: Span) => { + try { + const result = await fn(span); + span.setStatus({ code: SpanStatusCode.OK }); + return result; + } catch (error) { + span.setStatus({ + code: SpanStatusCode.ERROR, + message: error instanceof Error ? error.message : String(error), + }); + span.recordException( + error instanceof Error ? error : new Error(String(error)), + ); + throw error; + } finally { + span.end(); + } + }; + if (parentContext) { + return tracer.startActiveSpan( + spanName, + { attributes }, + parentContext, + runBody, + ); + } + return tracer.startActiveSpan(spanName, { attributes }, runBody); +} + /** * Run `fn` inside an OTel `tool.execute` span. This mirrors the internal * TraceCollector span that already wraps Sim-side tool work, so the @@ -108,35 +228,106 @@ export interface CopilotOtelScope { transport: "headless" | "stream"; } +/** + * Build the canonical `gen_ai.agent.execute` attribute set from a scope. + * Shared between `withCopilotOtelContext` (fully-managed lifetime) and + * `startCopilotOtelRoot` (manually-managed, for handlers that need the + * span to outlive the synchronous handler body — e.g. SSE routes). + */ +function buildAgentSpanAttributes( + scope: CopilotOtelScope, +): Record { + return { + "gen_ai.agent.name": "mothership", + "gen_ai.agent.id": + scope.transport === "stream" ? "mothership-stream" : "mothership-headless", + "gen_ai.operation.name": + scope.transport === "stream" ? "chat" : "invoke_agent", + "request.id": scope.requestId, + "sim.request_id": scope.requestId, + "copilot.route": scope.route ?? "", + "copilot.transport": scope.transport, + ...(scope.chatId ? { "chat.id": scope.chatId } : {}), + ...(scope.workflowId ? { "workflow.id": scope.workflowId } : {}), + ...(scope.executionId ? { "workflow.execution_id": scope.executionId } : {}), + ...(scope.runId ? { "run.id": scope.runId } : {}), + ...(scope.streamId ? { "stream.id": scope.streamId } : {}), + }; +} + +/** + * Start a `gen_ai.agent.execute` root span with manually-managed + * lifetime. Returns the span, its context, and a `finish` callback the + * caller MUST invoke when the whole request lifecycle is over (including + * any SSE streaming that outlives the Next.js handler return). + * + * Use this for the chat POST handler path: + * 1. Start the root at the top so `persistUserMessage` and every other + * setup span is a child instead of orphaning into a new trace. + * 2. Pass the context into `createSSEStream` so the stream callback + * re-enters it (AsyncLocalStorage does not survive the Next.js + * handler return into the ReadableStream runtime). + * 3. Call `finish()` from the stream's terminal code path. + * + * Prefer `withCopilotOtelContext` when the work is fully inside one + * async function (e.g. headless invoke) — it handles the lifecycle for + * you. + */ +export interface CopilotOtelRoot { + span: Span; + context: Context; + finish: (outcome?: CopilotLifecycleOutcome, error?: unknown) => void; +} + +export function startCopilotOtelRoot(scope: CopilotOtelScope): CopilotOtelRoot { + // Create gen_ai.agent.execute as a TRUE root span — do not inherit + // from Next.js's HTTP handler span. The framework span is dropped by + // our sampler (it has `next.span_type`), so if we parented under it, + // this span would appear orphaned in Jaeger ("span has missing parent" + // warning) and any descendant whose AsyncLocalStorage propagation was + // disrupted would inherit the same dropped parent. Starting from + // ROOT_CONTEXT gives the mothership lifecycle its own clean trace tree. + const parentContext = ROOT_CONTEXT; + const span = getTracer().startSpan( + TraceSpan.GenAiAgentExecute, + { attributes: buildAgentSpanAttributes(scope) }, + parentContext, + ); + const carrierSpan = isValidSpanContext(span.spanContext()) + ? span + : trace.wrapSpanContext(createFallbackSpanContext()); + const rootContext = trace.setSpan(parentContext, carrierSpan); + + let finished = false; + const finish: CopilotOtelRoot["finish"] = (outcome = "success", error) => { + if (finished) return; + finished = true; + span.setAttribute("copilot.request.outcome", outcome); + if (error) { + span.setStatus({ + code: SpanStatusCode.ERROR, + message: error instanceof Error ? error.message : String(error), + }); + span.recordException( + error instanceof Error ? error : new Error(String(error)), + ); + } else if (outcome === "success") { + span.setStatus({ code: SpanStatusCode.OK }); + } + span.end(); + }; + + return { span, context: rootContext, finish }; +} + export async function withCopilotOtelContext( scope: CopilotOtelScope, fn: (otelContext: Context) => Promise, ): Promise { const parentContext = context.active(); const span = getTracer().startSpan( - "gen_ai.agent.execute", - { - attributes: { - "gen_ai.agent.name": "mothership", - "gen_ai.agent.id": - scope.transport === "stream" - ? "mothership-stream" - : "mothership-headless", - "gen_ai.operation.name": - scope.transport === "stream" ? "chat" : "invoke_agent", - "request.id": scope.requestId, - "sim.request_id": scope.requestId, - "copilot.route": scope.route ?? "", - "copilot.transport": scope.transport, - ...(scope.chatId ? { "chat.id": scope.chatId } : {}), - ...(scope.workflowId ? { "workflow.id": scope.workflowId } : {}), - ...(scope.executionId - ? { "workflow.execution_id": scope.executionId } - : {}), - ...(scope.runId ? { "run.id": scope.runId } : {}), - ...(scope.streamId ? { "stream.id": scope.streamId } : {}), - }, - }, + TraceSpan.GenAiAgentExecute, + { attributes: buildAgentSpanAttributes(scope) }, parentContext, ); const carrierSpan = isValidSpanContext(span.spanContext()) diff --git a/apps/sim/lib/copilot/request/session/recovery.ts b/apps/sim/lib/copilot/request/session/recovery.ts index 0ae7ed563b6..7271ff27258 100644 --- a/apps/sim/lib/copilot/request/session/recovery.ts +++ b/apps/sim/lib/copilot/request/session/recovery.ts @@ -3,6 +3,8 @@ import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, } from "@/lib/copilot/generated/mothership-stream-v1"; +import { TraceSpan } from "@/lib/copilot/generated/trace-spans-v1"; +import { withCopilotSpan } from "@/lib/copilot/request/otel"; import { getLatestSeq, getOldestSeq, readEvents } from "./buffer"; import { createEvent } from "./event"; @@ -20,69 +22,87 @@ export async function checkForReplayGap( ): Promise { const requestedAfterSeq = Number(afterCursor || "0"); if (requestedAfterSeq <= 0) { + // Fast path: no cursor → nothing to check. Skip the span to avoid + // emitting zero-work spans on every stream connect. return null; } - const oldestSeq = await getOldestSeq(streamId); - const latestSeq = await getLatestSeq(streamId); + return withCopilotSpan( + TraceSpan.CopilotRecoveryCheckReplayGap, + { + "stream.id": streamId, + "copilot.recovery.requested_after_seq": requestedAfterSeq, + ...(requestId ? { "request.id": requestId } : {}), + }, + async (span) => { + const oldestSeq = await getOldestSeq(streamId); + const latestSeq = await getLatestSeq(streamId); + span.setAttributes({ + "copilot.recovery.oldest_seq": oldestSeq ?? -1, + "copilot.recovery.latest_seq": latestSeq ?? -1, + }); - if ( - latestSeq !== null && - latestSeq > 0 && - oldestSeq !== null && - requestedAfterSeq < oldestSeq - 1 - ) { - const resolvedRequestId = await resolveReplayGapRequestId( - streamId, - latestSeq, - requestId, - ); - logger.warn( - "Replay gap detected: requested cursor is below oldest available event", - { - streamId, - requestedAfterSeq, - oldestAvailableSeq: oldestSeq, - latestSeq, - }, - ); + if ( + latestSeq !== null && + latestSeq > 0 && + oldestSeq !== null && + requestedAfterSeq < oldestSeq - 1 + ) { + const resolvedRequestId = await resolveReplayGapRequestId( + streamId, + latestSeq, + requestId, + ); + logger.warn( + "Replay gap detected: requested cursor is below oldest available event", + { + streamId, + requestedAfterSeq, + oldestAvailableSeq: oldestSeq, + latestSeq, + }, + ); + span.setAttribute("copilot.recovery.outcome", "gap_detected"); - const gapEnvelope = createEvent({ - streamId, - cursor: String(latestSeq + 1), - seq: latestSeq + 1, - requestId: resolvedRequestId, - type: MothershipStreamV1EventType.error, - payload: { - message: - "Replay history is no longer available. Some events may have been lost.", - code: "replay_gap", - data: { - oldestAvailableSeq: oldestSeq, - requestedAfterSeq, - }, - }, - }); + const gapEnvelope = createEvent({ + streamId, + cursor: String(latestSeq + 1), + seq: latestSeq + 1, + requestId: resolvedRequestId, + type: MothershipStreamV1EventType.error, + payload: { + message: + "Replay history is no longer available. Some events may have been lost.", + code: "replay_gap", + data: { + oldestAvailableSeq: oldestSeq, + requestedAfterSeq, + }, + }, + }); - const terminalEnvelope = createEvent({ - streamId, - cursor: String(latestSeq + 2), - seq: latestSeq + 2, - requestId: resolvedRequestId, - type: MothershipStreamV1EventType.complete, - payload: { - status: MothershipStreamV1CompletionStatus.error, - reason: "replay_gap", - }, - }); + const terminalEnvelope = createEvent({ + streamId, + cursor: String(latestSeq + 2), + seq: latestSeq + 2, + requestId: resolvedRequestId, + type: MothershipStreamV1EventType.complete, + payload: { + status: MothershipStreamV1CompletionStatus.error, + reason: "replay_gap", + }, + }); - return { - gapDetected: true, - envelopes: [gapEnvelope, terminalEnvelope], - }; - } + return { + gapDetected: true, + envelopes: [gapEnvelope, terminalEnvelope], + }; + } - return null; + span.setAttribute("copilot.recovery.outcome", "in_range"); + return null; + }, + ); } async function resolveReplayGapRequestId( diff --git a/apps/sim/lib/copilot/request/subagent.ts b/apps/sim/lib/copilot/request/subagent.ts index acf31d685f7..6b569790f93 100644 --- a/apps/sim/lib/copilot/request/subagent.ts +++ b/apps/sim/lib/copilot/request/subagent.ts @@ -8,6 +8,8 @@ import { import { createStreamingContext } from "@/lib/copilot/request/context/request-context"; import { buildToolCallSummaries } from "@/lib/copilot/request/context/result"; import { runStreamLoop } from "@/lib/copilot/request/go/stream"; +import { TraceSpan } from "@/lib/copilot/generated/trace-spans-v1"; +import { withCopilotSpan } from "@/lib/copilot/request/otel"; import type { ExecutionContext, OrchestratorOptions, @@ -54,6 +56,42 @@ export async function orchestrateSubagentStream( agentId: string, requestPayload: Record, options: SubagentOrchestratorOptions, +): Promise { + return withCopilotSpan( + TraceSpan.CopilotSubagentExecute, + { + "subagent.id": agentId, + "user.id": options.userId, + ...(options.simRequestId ? { "sim.request_id": options.simRequestId } : {}), + ...(options.workflowId ? { "workflow.id": options.workflowId } : {}), + ...(options.workspaceId ? { "workspace.id": options.workspaceId } : {}), + }, + async (otelSpan) => { + const result = await orchestrateSubagentStreamInner( + agentId, + requestPayload, + options, + ); + otelSpan.setAttributes({ + "subagent.outcome.success": result.success, + "subagent.outcome.tool_call_count": result.toolCalls.length, + "subagent.outcome.content_bytes": result.content?.length ?? 0, + ...(result.structuredResult?.type + ? { "subagent.outcome.structured_type": result.structuredResult.type } + : {}), + ...(result.error + ? { "subagent.outcome.error": String(result.error).slice(0, 500) } + : {}), + }); + return result; + }, + ); +} + +async function orchestrateSubagentStreamInner( + agentId: string, + requestPayload: Record, + options: SubagentOrchestratorOptions, ): Promise { const { userId, workflowId, workspaceId, userPermission } = options; const chatId = diff --git a/apps/sim/lib/copilot/request/tools/files.ts b/apps/sim/lib/copilot/request/tools/files.ts index 64bb4bf6a1d..2c94cebde94 100644 --- a/apps/sim/lib/copilot/request/tools/files.ts +++ b/apps/sim/lib/copilot/request/tools/files.ts @@ -1,5 +1,7 @@ import { createLogger } from '@sim/logger' import { FunctionExecute, UserTable } from '@/lib/copilot/generated/tool-catalog-v1' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' +import { withCopilotSpan } from '@/lib/copilot/request/otel' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/request/types' import { uploadWorkspaceFile } from '@/lib/uploads/contexts/workspace/workspace-file-manager' @@ -146,55 +148,80 @@ export async function maybeWriteOutputToFile( const explicitFormat = (params?.outputFormat as string | undefined) ?? (args?.outputFormat as string | undefined) - try { - const fileName = normalizeOutputWorkspaceFileName(outputPath) - const format = resolveOutputFormat(fileName, explicitFormat) - if (context.abortSignal?.aborted) { - throw new Error('Request aborted before tool mutation could be applied') - } - const content = serializeOutputForFile(result.output, format) - const contentType = FORMAT_TO_CONTENT_TYPE[format] - - const buffer = Buffer.from(content, 'utf-8') - if (context.abortSignal?.aborted) { - throw new Error('Request aborted before tool mutation could be applied') - } - const uploaded = await uploadWorkspaceFile( - context.workspaceId, - context.userId, - buffer, - fileName, - contentType - ) - - logger.info('Tool output written to file', { - toolName, - fileName, - size: buffer.length, - fileId: uploaded.id, - }) - - return { - success: true, - output: { - message: `Output written to files/${fileName} (${buffer.length} bytes)`, - fileId: uploaded.id, - fileName, - size: buffer.length, - downloadUrl: uploaded.url, - }, - resources: [{ type: 'file', id: uploaded.id, title: fileName }], - } - } catch (err) { - const message = err instanceof Error ? err.message : String(err) - logger.warn('Failed to write tool output to file', { - toolName, - outputPath, - error: message, - }) - return { - success: false, - error: `Failed to write output file: ${message}`, - } - } + // Only span the actual write path (where we upload to storage). Fast + // no-op returns above don't need a span — they'd just pad the trace + // with empty work. + return withCopilotSpan( + TraceSpan.CopilotToolsWriteOutputFile, + { + 'tool.name': toolName, + 'workspace.id': context.workspaceId, + }, + async (span) => { + try { + const fileName = normalizeOutputWorkspaceFileName(outputPath) + const format = resolveOutputFormat(fileName, explicitFormat) + span.setAttributes({ + 'copilot.output_file.name': fileName, + 'copilot.output_file.format': format, + }) + if (context.abortSignal?.aborted) { + throw new Error('Request aborted before tool mutation could be applied') + } + const content = serializeOutputForFile(result.output, format) + const contentType = FORMAT_TO_CONTENT_TYPE[format] + + const buffer = Buffer.from(content, 'utf-8') + span.setAttribute('copilot.output_file.bytes', buffer.length) + if (context.abortSignal?.aborted) { + throw new Error('Request aborted before tool mutation could be applied') + } + const uploaded = await uploadWorkspaceFile( + context.workspaceId!, + context.userId!, + buffer, + fileName, + contentType + ) + span.setAttributes({ + 'copilot.output_file.id': uploaded.id, + 'copilot.output_file.outcome': 'uploaded', + }) + + logger.info('Tool output written to file', { + toolName, + fileName, + size: buffer.length, + fileId: uploaded.id, + }) + + return { + success: true, + output: { + message: `Output written to files/${fileName} (${buffer.length} bytes)`, + fileId: uploaded.id, + fileName, + size: buffer.length, + downloadUrl: uploaded.url, + }, + resources: [{ type: 'file', id: uploaded.id, title: fileName }], + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err) + logger.warn('Failed to write tool output to file', { + toolName, + outputPath, + error: message, + }) + span.setAttribute('copilot.output_file.outcome', 'failed') + span.addEvent('copilot.output_file.error', { + 'error.message': message.slice(0, 500), + }) + return { + success: false, + error: `Failed to write output file: ${message}`, + } + } + }, + ) } diff --git a/apps/sim/lib/copilot/request/tools/resources.ts b/apps/sim/lib/copilot/request/tools/resources.ts index b14f0caf79e..22f02ba959e 100644 --- a/apps/sim/lib/copilot/request/tools/resources.ts +++ b/apps/sim/lib/copilot/request/tools/resources.ts @@ -3,6 +3,8 @@ import { MothershipStreamV1EventType, MothershipStreamV1ResourceOp, } from '@/lib/copilot/generated/mothership-stream-v1' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' +import { withCopilotSpan } from '@/lib/copilot/request/otel' import type { StreamEvent, ToolCallResult } from '@/lib/copilot/request/types' import { extractDeletedResourcesFromToolResult, @@ -29,63 +31,94 @@ export async function handleResourceSideEffects( onEvent: ((event: StreamEvent) => void | Promise) | undefined, isAborted: () => boolean ): Promise { - let isDeleteOp = false + // Cheap early exit so we don't emit a span for tools that can never + // produce resources (most of them). The span only shows up for tools + // that might actually do resource work. + if (!hasDeleteCapability(toolName) && !isResourceToolName(toolName) && !(result.resources && result.resources.length > 0)) { + return + } - if (hasDeleteCapability(toolName)) { - const deleted = extractDeletedResourcesFromToolResult(toolName, params, result.output) - if (deleted.length > 0) { - isDeleteOp = true - removeChatResources(chatId, deleted).catch((err) => { - logger.warn('Failed to remove chat resources after deletion', { - chatId, - error: err instanceof Error ? err.message : String(err), - }) - }) + return withCopilotSpan( + TraceSpan.CopilotToolsHandleResourceSideEffects, + { + 'tool.name': toolName, + 'chat.id': chatId, + }, + async (span) => { + let isDeleteOp = false + let removedCount = 0 + let upsertedCount = 0 + + if (hasDeleteCapability(toolName)) { + const deleted = extractDeletedResourcesFromToolResult(toolName, params, result.output) + if (deleted.length > 0) { + isDeleteOp = true + removedCount = deleted.length + // Detached from the span lifecycle — the span ends before the + // DB call completes. That is intentional; we want the span to + // reflect the synchronous decision + event emission, not the + // best-effort persistence. + removeChatResources(chatId, deleted).catch((err) => { + logger.warn('Failed to remove chat resources after deletion', { + chatId, + error: err instanceof Error ? err.message : String(err), + }) + }) - for (const resource of deleted) { - if (isAborted()) break - await onEvent?.({ - type: MothershipStreamV1EventType.resource, - payload: { - op: MothershipStreamV1ResourceOp.remove, - resource: { type: resource.type, id: resource.id, title: resource.title }, - }, - }) + for (const resource of deleted) { + if (isAborted()) break + await onEvent?.({ + type: MothershipStreamV1EventType.resource, + payload: { + op: MothershipStreamV1ResourceOp.remove, + resource: { type: resource.type, id: resource.id, title: resource.title }, + }, + }) + } + } } - } - } - if (!isDeleteOp && !isAborted()) { - const resources = - result.resources && result.resources.length > 0 - ? result.resources - : isResourceToolName(toolName) - ? extractResourcesFromToolResult(toolName, params, result.output) - : [] + if (!isDeleteOp && !isAborted()) { + const resources = + result.resources && result.resources.length > 0 + ? result.resources + : isResourceToolName(toolName) + ? extractResourcesFromToolResult(toolName, params, result.output) + : [] - if (resources.length > 0) { - logger.info('[file-stream-server] Emitting resource upsert events', { - toolName, - chatId, - resources: resources.map((r) => ({ type: r.type, id: r.id, title: r.title })), - }) - persistChatResources(chatId, resources).catch((err) => { - logger.warn('Failed to persist chat resources', { - chatId, - error: err instanceof Error ? err.message : String(err), - }) - }) + if (resources.length > 0) { + upsertedCount = resources.length + logger.info('[file-stream-server] Emitting resource upsert events', { + toolName, + chatId, + resources: resources.map((r) => ({ type: r.type, id: r.id, title: r.title })), + }) + persistChatResources(chatId, resources).catch((err) => { + logger.warn('Failed to persist chat resources', { + chatId, + error: err instanceof Error ? err.message : String(err), + }) + }) - for (const resource of resources) { - if (isAborted()) break - await onEvent?.({ - type: MothershipStreamV1EventType.resource, - payload: { - op: MothershipStreamV1ResourceOp.upsert, - resource: { type: resource.type, id: resource.id, title: resource.title }, - }, - }) + for (const resource of resources) { + if (isAborted()) break + await onEvent?.({ + type: MothershipStreamV1EventType.resource, + payload: { + op: MothershipStreamV1ResourceOp.upsert, + resource: { type: resource.type, id: resource.id, title: resource.title }, + }, + }) + } + } } - } - } + + span.setAttributes({ + 'copilot.resources.op': isDeleteOp ? 'delete' : upsertedCount > 0 ? 'upsert' : 'none', + 'copilot.resources.removed_count': removedCount, + 'copilot.resources.upserted_count': upsertedCount, + 'copilot.resources.aborted': isAborted(), + }) + }, + ) } diff --git a/apps/sim/lib/copilot/request/tools/tables.ts b/apps/sim/lib/copilot/request/tools/tables.ts index 89e0a5c19f0..7489f07934c 100644 --- a/apps/sim/lib/copilot/request/tools/tables.ts +++ b/apps/sim/lib/copilot/request/tools/tables.ts @@ -4,6 +4,8 @@ import { createLogger } from '@sim/logger' import { parse as csvParse } from 'csv-parse/sync' import { eq } from 'drizzle-orm' import { FunctionExecute, Read as ReadTool } from '@/lib/copilot/generated/tool-catalog-v1' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' +import { withCopilotSpan } from '@/lib/copilot/request/otel' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/request/types' import { getTableById } from '@/lib/table/service' @@ -25,105 +27,127 @@ export async function maybeWriteOutputToTable( const outputTable = params?.outputTable as string | undefined if (!outputTable) return result - try { - const table = await getTableById(outputTable) - if (!table) { - return { - success: false, - error: `Table "${outputTable}" not found`, - } - } + return withCopilotSpan( + TraceSpan.CopilotToolsWriteOutputTable, + { + 'tool.name': toolName, + 'copilot.table.id': outputTable, + 'workspace.id': context.workspaceId, + }, + async (span) => { + try { + const table = await getTableById(outputTable) + if (!table) { + span.setAttribute('copilot.table.outcome', 'table_not_found') + return { + success: false, + error: `Table "${outputTable}" not found`, + } + } - const rawOutput = result.output - let rows: Array> + const rawOutput = result.output + let rows: Array> - if (rawOutput && typeof rawOutput === 'object' && 'result' in rawOutput) { - const inner = (rawOutput as Record).result - if (Array.isArray(inner)) { - rows = inner - } else { - return { - success: false, - error: 'outputTable requires the code to return an array of objects', + if (rawOutput && typeof rawOutput === 'object' && 'result' in rawOutput) { + const inner = (rawOutput as Record).result + if (Array.isArray(inner)) { + rows = inner + } else { + span.setAttribute('copilot.table.outcome', 'invalid_shape') + return { + success: false, + error: 'outputTable requires the code to return an array of objects', + } + } + } else if (Array.isArray(rawOutput)) { + rows = rawOutput + } else { + span.setAttribute('copilot.table.outcome', 'invalid_shape') + return { + success: false, + error: 'outputTable requires the code to return an array of objects', + } } - } - } else if (Array.isArray(rawOutput)) { - rows = rawOutput - } else { - return { - success: false, - error: 'outputTable requires the code to return an array of objects', - } - } - if (rows.length > MAX_OUTPUT_TABLE_ROWS) { - return { - success: false, - error: `outputTable row limit exceeded: got ${rows.length}, max is ${MAX_OUTPUT_TABLE_ROWS}`, - } - } + span.setAttribute('copilot.table.row_count', rows.length) - if (rows.length === 0) { - return { - success: false, - error: 'outputTable requires at least one row — code returned an empty array', - } - } - - if (context.abortSignal?.aborted) { - throw new Error('Request aborted before tool mutation could be applied') - } - await db.transaction(async (tx) => { - if (context.abortSignal?.aborted) { - throw new Error('Request aborted before tool mutation could be applied') - } - await tx.delete(userTableRows).where(eq(userTableRows.tableId, outputTable)) + if (rows.length > MAX_OUTPUT_TABLE_ROWS) { + span.setAttribute('copilot.table.outcome', 'row_limit_exceeded') + return { + success: false, + error: `outputTable row limit exceeded: got ${rows.length}, max is ${MAX_OUTPUT_TABLE_ROWS}`, + } + } + + if (rows.length === 0) { + span.setAttribute('copilot.table.outcome', 'empty_rows') + return { + success: false, + error: 'outputTable requires at least one row — code returned an empty array', + } + } - const now = new Date() - for (let i = 0; i < rows.length; i += BATCH_CHUNK_SIZE) { if (context.abortSignal?.aborted) { throw new Error('Request aborted before tool mutation could be applied') } - const chunk = rows.slice(i, i + BATCH_CHUNK_SIZE) - const values = chunk.map((rowData, j) => ({ - id: `row_${crypto.randomUUID().replace(/-/g, '')}`, + await db.transaction(async (tx) => { + if (context.abortSignal?.aborted) { + throw new Error('Request aborted before tool mutation could be applied') + } + await tx.delete(userTableRows).where(eq(userTableRows.tableId, outputTable)) + + const now = new Date() + for (let i = 0; i < rows.length; i += BATCH_CHUNK_SIZE) { + if (context.abortSignal?.aborted) { + throw new Error('Request aborted before tool mutation could be applied') + } + const chunk = rows.slice(i, i + BATCH_CHUNK_SIZE) + const values = chunk.map((rowData, j) => ({ + id: `row_${crypto.randomUUID().replace(/-/g, '')}`, + tableId: outputTable, + workspaceId: context.workspaceId!, + data: rowData, + position: i + j, + createdAt: now, + updatedAt: now, + createdBy: context.userId, + })) + await tx.insert(userTableRows).values(values) + } + }) + + logger.info('Tool output written to table', { + toolName, tableId: outputTable, - workspaceId: context.workspaceId!, - data: rowData, - position: i + j, - createdAt: now, - updatedAt: now, - createdBy: context.userId, - })) - await tx.insert(userTableRows).values(values) + rowCount: rows.length, + }) + span.setAttribute('copilot.table.outcome', 'wrote') + return { + success: true, + output: { + message: `Wrote ${rows.length} rows to table ${outputTable}`, + tableId: outputTable, + rowCount: rows.length, + }, + } + } catch (err) { + logger.warn('Failed to write tool output to table', { + toolName, + outputTable, + error: err instanceof Error ? err.message : String(err), + }) + span.setAttribute('copilot.table.outcome', 'failed') + span.addEvent('copilot.table.error', { + 'error.message': + (err instanceof Error ? err.message : String(err)).slice(0, 500), + }) + return { + success: false, + error: `Failed to write to table: ${err instanceof Error ? err.message : String(err)}`, + } } - }) - - logger.info('Tool output written to table', { - toolName, - tableId: outputTable, - rowCount: rows.length, - }) - - return { - success: true, - output: { - message: `Wrote ${rows.length} rows to table ${outputTable}`, - tableId: outputTable, - rowCount: rows.length, - }, - } - } catch (err) { - logger.warn('Failed to write tool output to table', { - toolName, - outputTable, - error: err instanceof Error ? err.message : String(err), - }) - return { - success: false, - error: `Failed to write to table: ${err instanceof Error ? err.message : String(err)}`, - } - } + }, + ) } export async function maybeWriteReadCsvToTable( @@ -139,110 +163,137 @@ export async function maybeWriteReadCsvToTable( const outputTable = params?.outputTable as string | undefined if (!outputTable) return result - try { - const table = await getTableById(outputTable) - if (!table) { - return { success: false, error: `Table "${outputTable}" not found` } - } + return withCopilotSpan( + TraceSpan.CopilotToolsWriteCsvToTable, + { + 'tool.name': toolName, + 'copilot.table.id': outputTable, + 'workspace.id': context.workspaceId, + }, + async (span) => { + try { + const table = await getTableById(outputTable) + if (!table) { + span.setAttribute('copilot.table.outcome', 'table_not_found') + return { success: false, error: `Table "${outputTable}" not found` } + } - const output = result.output as Record - const content = (output.content as string) || '' - if (!content.trim()) { - return { success: false, error: 'File has no content to import into table' } - } + const output = result.output as Record + const content = (output.content as string) || '' + if (!content.trim()) { + span.setAttribute('copilot.table.outcome', 'empty_content') + return { success: false, error: 'File has no content to import into table' } + } - const filePath = (params?.path as string) || '' - const ext = filePath.split('.').pop()?.toLowerCase() + const filePath = (params?.path as string) || '' + const ext = filePath.split('.').pop()?.toLowerCase() + span.setAttributes({ + 'copilot.table.source.path': filePath, + 'copilot.table.source.format': ext === 'json' ? 'json' : 'csv', + 'copilot.table.source.content_bytes': content.length, + }) - let rows: Record[] + let rows: Record[] - if (ext === 'json') { - const parsed = JSON.parse(content) - if (!Array.isArray(parsed)) { - return { - success: false, - error: 'JSON file must contain an array of objects for table import', + if (ext === 'json') { + const parsed = JSON.parse(content) + if (!Array.isArray(parsed)) { + span.setAttribute('copilot.table.outcome', 'invalid_json_shape') + return { + success: false, + error: 'JSON file must contain an array of objects for table import', + } + } + rows = parsed + } else { + rows = csvParse(content, { + columns: true, + skip_empty_lines: true, + trim: true, + relax_column_count: true, + relax_quotes: true, + skip_records_with_error: true, + cast: false, + }) as Record[] + } + + span.setAttribute('copilot.table.row_count', rows.length) + + if (rows.length === 0) { + span.setAttribute('copilot.table.outcome', 'empty_rows') + return { success: false, error: 'File has no data rows to import' } + } + + if (rows.length > MAX_OUTPUT_TABLE_ROWS) { + span.setAttribute('copilot.table.outcome', 'row_limit_exceeded') + return { + success: false, + error: `Row limit exceeded: got ${rows.length}, max is ${MAX_OUTPUT_TABLE_ROWS}`, + } } - } - rows = parsed - } else { - rows = csvParse(content, { - columns: true, - skip_empty_lines: true, - trim: true, - relax_column_count: true, - relax_quotes: true, - skip_records_with_error: true, - cast: false, - }) as Record[] - } - - if (rows.length === 0) { - return { success: false, error: 'File has no data rows to import' } - } - - if (rows.length > MAX_OUTPUT_TABLE_ROWS) { - return { - success: false, - error: `Row limit exceeded: got ${rows.length}, max is ${MAX_OUTPUT_TABLE_ROWS}`, - } - } - - if (context.abortSignal?.aborted) { - throw new Error('Request aborted before tool mutation could be applied') - } - await db.transaction(async (tx) => { - if (context.abortSignal?.aborted) { - throw new Error('Request aborted before tool mutation could be applied') - } - await tx.delete(userTableRows).where(eq(userTableRows.tableId, outputTable)) - const now = new Date() - for (let i = 0; i < rows.length; i += BATCH_CHUNK_SIZE) { if (context.abortSignal?.aborted) { throw new Error('Request aborted before tool mutation could be applied') } - const chunk = rows.slice(i, i + BATCH_CHUNK_SIZE) - const values = chunk.map((rowData, j) => ({ - id: `row_${crypto.randomUUID().replace(/-/g, '')}`, + await db.transaction(async (tx) => { + if (context.abortSignal?.aborted) { + throw new Error('Request aborted before tool mutation could be applied') + } + await tx.delete(userTableRows).where(eq(userTableRows.tableId, outputTable)) + + const now = new Date() + for (let i = 0; i < rows.length; i += BATCH_CHUNK_SIZE) { + if (context.abortSignal?.aborted) { + throw new Error('Request aborted before tool mutation could be applied') + } + const chunk = rows.slice(i, i + BATCH_CHUNK_SIZE) + const values = chunk.map((rowData, j) => ({ + id: `row_${crypto.randomUUID().replace(/-/g, '')}`, + tableId: outputTable, + workspaceId: context.workspaceId!, + data: rowData, + position: i + j, + createdAt: now, + updatedAt: now, + createdBy: context.userId, + })) + await tx.insert(userTableRows).values(values) + } + }) + + logger.info('Read output written to table', { + toolName, tableId: outputTable, - workspaceId: context.workspaceId!, - data: rowData, - position: i + j, - createdAt: now, - updatedAt: now, - createdBy: context.userId, - })) - await tx.insert(userTableRows).values(values) + tableName: table.name, + rowCount: rows.length, + filePath, + }) + span.setAttribute('copilot.table.outcome', 'imported') + return { + success: true, + output: { + message: `Imported ${rows.length} rows from "${filePath}" into table "${table.name}"`, + tableId: outputTable, + tableName: table.name, + rowCount: rows.length, + }, + } + } catch (err) { + logger.warn('Failed to write read output to table', { + toolName, + outputTable, + error: err instanceof Error ? err.message : String(err), + }) + span.setAttribute('copilot.table.outcome', 'failed') + span.addEvent('copilot.table.error', { + 'error.message': + (err instanceof Error ? err.message : String(err)).slice(0, 500), + }) + return { + success: false, + error: `Failed to import into table: ${err instanceof Error ? err.message : String(err)}`, + } } - }) - - logger.info('Read output written to table', { - toolName, - tableId: outputTable, - tableName: table.name, - rowCount: rows.length, - filePath, - }) - - return { - success: true, - output: { - message: `Imported ${rows.length} rows from "${filePath}" into table "${table.name}"`, - tableId: outputTable, - tableName: table.name, - rowCount: rows.length, - }, - } - } catch (err) { - logger.warn('Failed to write read output to table', { - toolName, - outputTable, - error: err instanceof Error ? err.message : String(err), - }) - return { - success: false, - error: `Failed to import into table: ${err instanceof Error ? err.message : String(err)}`, - } - } + }, + ) } diff --git a/apps/sim/lib/copilot/vfs/file-reader.ts b/apps/sim/lib/copilot/vfs/file-reader.ts index faf08c3608d..7a4deee6cf6 100644 --- a/apps/sim/lib/copilot/vfs/file-reader.ts +++ b/apps/sim/lib/copilot/vfs/file-reader.ts @@ -1,8 +1,26 @@ +import { SpanStatusCode, trace, type Span } from '@opentelemetry/api' import { createLogger } from '@sim/logger' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import type { WorkspaceFileRecord } from '@/lib/uploads/contexts/workspace/workspace-file-manager' import { downloadWorkspaceFile } from '@/lib/uploads/contexts/workspace/workspace-file-manager' import { isImageFileType } from '@/lib/uploads/utils/file-utils' +/** + * Lazy tracer (see lib/copilot/request/otel.ts for the same pattern and + * why we resolve on every call). + */ +function getVfsTracer() { + return trace.getTracer('sim-copilot-vfs', '1.0.0') +} + +function recordSpanError(span: Span, err: unknown) { + span.setStatus({ + code: SpanStatusCode.ERROR, + message: err instanceof Error ? err.message : String(err), + }) + span.recordException(err instanceof Error ? err : new Error(String(err))) +} + const logger = createLogger('FileReader') const MAX_TEXT_READ_BYTES = 5 * 1024 * 1024 // 5 MB @@ -50,106 +68,189 @@ interface PreparedVisionImage { resized: boolean } +/** + * Prepare an image for vision models: detect media type, optionally + * resize/compress with sharp, and return the prepared buffer. + * + * Wrapped in a `copilot.vfs.prepare_image` span so the external trace + * shows exactly when an image read blocked the request on CPU-heavy + * encode attempts. Attributes record input dimensions, whether a resize + * was needed, how many encode attempts it took, and the final + * dimension/quality chosen. + */ async function prepareImageForVision( buffer: Buffer, claimedType: string ): Promise { - const mediaType = detectImageMime(buffer, claimedType) - - let sharpModule: typeof import('sharp').default - try { - sharpModule = (await import('sharp')).default - } catch (err) { - logger.warn('Failed to load sharp for image preparation', { - mediaType, - error: err instanceof Error ? err.message : String(err), - }) - return buffer.length <= MAX_IMAGE_READ_BYTES ? { buffer, mediaType, resized: false } : null - } - - let metadata: Awaited['metadata']>> - try { - metadata = await sharpModule(buffer, { limitInputPixels: false }).metadata() - } catch (err) { - logger.warn('Failed to read image metadata for VFS read', { - mediaType, - error: err instanceof Error ? err.message : String(err), - }) - return buffer.length <= MAX_IMAGE_READ_BYTES ? { buffer, mediaType, resized: false } : null - } - - const width = metadata.width ?? 0 - const height = metadata.height ?? 0 - const needsResize = - buffer.length > MAX_IMAGE_READ_BYTES || - width > MAX_IMAGE_DIMENSION || - height > MAX_IMAGE_DIMENSION - if (!needsResize) { - return { buffer, mediaType, resized: false } - } - - const hasAlpha = Boolean( - metadata.hasAlpha || - mediaType === 'image/png' || - mediaType === 'image/webp' || - mediaType === 'image/gif' - ) - - for (const dimension of IMAGE_RESIZE_DIMENSIONS) { - for (const quality of IMAGE_QUALITY_STEPS) { + return getVfsTracer().startActiveSpan( + TraceSpan.CopilotVfsPrepareImage, + { + attributes: { + 'copilot.vfs.input.bytes': buffer.length, + 'copilot.vfs.input.media_type_claimed': claimedType, + }, + }, + async (span) => { try { - const pipeline = sharpModule(buffer, { limitInputPixels: false }) - .rotate() - .resize({ - width: dimension, - height: dimension, - fit: 'inside', - withoutEnlargement: true, + const mediaType = detectImageMime(buffer, claimedType) + span.setAttribute('copilot.vfs.input.media_type_detected', mediaType) + + let sharpModule: typeof import('sharp').default + try { + sharpModule = (await import('sharp')).default + } catch (err) { + logger.warn('Failed to load sharp for image preparation', { + mediaType, + error: err instanceof Error ? err.message : String(err), }) + span.setAttribute('copilot.vfs.sharp.load_failed', true) + const fitsWithoutSharp = buffer.length <= MAX_IMAGE_READ_BYTES + span.setAttribute( + 'copilot.vfs.outcome', + fitsWithoutSharp ? 'passthrough_no_sharp' : 'rejected_no_sharp', + ) + return fitsWithoutSharp ? { buffer, mediaType, resized: false } : null + } - const transformed = hasAlpha - ? { - buffer: await pipeline - .webp({ quality, alphaQuality: quality, effort: 4 }) - .toBuffer(), - mediaType: 'image/webp', - } - : { - buffer: await pipeline - .jpeg({ quality, mozjpeg: true, chromaSubsampling: '4:4:4' }) - .toBuffer(), - mediaType: 'image/jpeg', - } + let metadata: Awaited['metadata']>> + try { + metadata = await sharpModule(buffer, { limitInputPixels: false }).metadata() + } catch (err) { + logger.warn('Failed to read image metadata for VFS read', { + mediaType, + error: err instanceof Error ? err.message : String(err), + }) + span.setAttribute('copilot.vfs.metadata.failed', true) + const fitsWithoutSharp = buffer.length <= MAX_IMAGE_READ_BYTES + span.setAttribute( + 'copilot.vfs.outcome', + fitsWithoutSharp ? 'passthrough_no_metadata' : 'rejected_no_metadata', + ) + return fitsWithoutSharp ? { buffer, mediaType, resized: false } : null + } - if (transformed.buffer.length <= MAX_IMAGE_READ_BYTES) { - logger.info('Resized image for VFS read', { - originalBytes: buffer.length, - outputBytes: transformed.buffer.length, - originalWidth: width || undefined, - originalHeight: height || undefined, - maxDimension: dimension, - quality, - originalMediaType: mediaType, - outputMediaType: transformed.mediaType, + const width = metadata.width ?? 0 + const height = metadata.height ?? 0 + span.setAttributes({ + 'copilot.vfs.input.width': width, + 'copilot.vfs.input.height': height, + }) + + const needsResize = + buffer.length > MAX_IMAGE_READ_BYTES || + width > MAX_IMAGE_DIMENSION || + height > MAX_IMAGE_DIMENSION + if (!needsResize) { + span.setAttributes({ + 'copilot.vfs.resized': false, + 'copilot.vfs.outcome': 'passthrough_fits_budget', + 'copilot.vfs.output.bytes': buffer.length, + 'copilot.vfs.output.media_type': mediaType, }) - return { - buffer: transformed.buffer, - mediaType: transformed.mediaType, - resized: true, + return { buffer, mediaType, resized: false } + } + + const hasAlpha = Boolean( + metadata.hasAlpha || + mediaType === 'image/png' || + mediaType === 'image/webp' || + mediaType === 'image/gif' + ) + span.setAttribute('copilot.vfs.has_alpha', hasAlpha) + + let attempts = 0 + for (const dimension of IMAGE_RESIZE_DIMENSIONS) { + for (const quality of IMAGE_QUALITY_STEPS) { + attempts += 1 + try { + const pipeline = sharpModule(buffer, { limitInputPixels: false }) + .rotate() + .resize({ + width: dimension, + height: dimension, + fit: 'inside', + withoutEnlargement: true, + }) + + const transformed = hasAlpha + ? { + buffer: await pipeline + .webp({ quality, alphaQuality: quality, effort: 4 }) + .toBuffer(), + mediaType: 'image/webp', + } + : { + buffer: await pipeline + .jpeg({ quality, mozjpeg: true, chromaSubsampling: '4:4:4' }) + .toBuffer(), + mediaType: 'image/jpeg', + } + + span.addEvent('copilot.vfs.resize_attempt', { + 'copilot.vfs.resize.dimension': dimension, + 'copilot.vfs.resize.quality': quality, + 'copilot.vfs.resize.output_bytes': transformed.buffer.length, + 'copilot.vfs.resize.fits_budget': + transformed.buffer.length <= MAX_IMAGE_READ_BYTES, + }) + + if (transformed.buffer.length <= MAX_IMAGE_READ_BYTES) { + logger.info('Resized image for VFS read', { + originalBytes: buffer.length, + outputBytes: transformed.buffer.length, + originalWidth: width || undefined, + originalHeight: height || undefined, + maxDimension: dimension, + quality, + originalMediaType: mediaType, + outputMediaType: transformed.mediaType, + }) + span.setAttributes({ + 'copilot.vfs.resized': true, + 'copilot.vfs.resize.attempts': attempts, + 'copilot.vfs.resize.chosen_dimension': dimension, + 'copilot.vfs.resize.chosen_quality': quality, + 'copilot.vfs.output.bytes': transformed.buffer.length, + 'copilot.vfs.output.media_type': transformed.mediaType, + 'copilot.vfs.outcome': 'resized', + }) + return { + buffer: transformed.buffer, + mediaType: transformed.mediaType, + resized: true, + } + } + } catch (err) { + logger.warn('Failed image resize attempt for VFS read', { + mediaType, + dimension, + quality, + error: err instanceof Error ? err.message : String(err), + }) + span.addEvent('copilot.vfs.resize_attempt_failed', { + 'copilot.vfs.resize.dimension': dimension, + 'copilot.vfs.resize.quality': quality, + 'error.message': + err instanceof Error ? err.message : String(err).slice(0, 500), + }) + } } } - } catch (err) { - logger.warn('Failed image resize attempt for VFS read', { - mediaType, - dimension, - quality, - error: err instanceof Error ? err.message : String(err), + + span.setAttributes({ + 'copilot.vfs.resized': false, + 'copilot.vfs.resize.attempts': attempts, + 'copilot.vfs.outcome': 'rejected_too_large_after_resize', }) + return null + } catch (err) { + recordSpanError(span, err) + throw err + } finally { + span.end() } - } - } - - return null + }, + ) } export interface FileReadResult { @@ -169,77 +270,133 @@ export interface FileReadResult { * Read and return the content of a workspace file record. * Handles images (base64 attachment), parseable documents (PDF, DOCX, etc.), * binary files, and plain text with size guards. + * + * Wrapped in `copilot.vfs.read_file` so the parent mothership trace shows + * per-file read latency, the path taken (image / text / parseable / + * binary), and any size rejection. The `prepareImageForVision` span + * nests underneath for the image-resize path. */ export async function readFileRecord(record: WorkspaceFileRecord): Promise { - try { - if (isImageFileType(record.type)) { - const originalBuffer = await downloadWorkspaceFile(record) - const prepared = await prepareImageForVision(originalBuffer, record.type) - if (!prepared) { - return { - content: `[Image too large: ${record.name} (${(record.size / 1024 / 1024).toFixed(1)}MB, limit 5MB after resize/compression)]`, - totalLines: 1, - } - } - const sizeKb = (prepared.buffer.length / 1024).toFixed(1) - const resizeNote = prepared.resized ? ', resized for vision' : '' - return { - content: `Image: ${record.name} (${sizeKb}KB, ${prepared.mediaType}${resizeNote})`, - totalLines: 1, - attachment: { - type: 'image', - source: { - type: 'base64', - media_type: prepared.mediaType, - data: prepared.buffer.toString('base64'), - }, - }, - } - } + return getVfsTracer().startActiveSpan( + TraceSpan.CopilotVfsReadFile, + { + attributes: { + 'copilot.vfs.file.name': record.name, + 'copilot.vfs.file.media_type': record.type, + 'copilot.vfs.file.size_bytes': record.size, + 'copilot.vfs.file.extension': getExtension(record.name), + }, + }, + async (span) => { + try { + if (isImageFileType(record.type)) { + span.setAttribute('copilot.vfs.read.path', 'image') + const originalBuffer = await downloadWorkspaceFile(record) + const prepared = await prepareImageForVision(originalBuffer, record.type) + if (!prepared) { + span.setAttribute('copilot.vfs.read.outcome', 'image_too_large') + return { + content: `[Image too large: ${record.name} (${(record.size / 1024 / 1024).toFixed(1)}MB, limit 5MB after resize/compression)]`, + totalLines: 1, + } + } + const sizeKb = (prepared.buffer.length / 1024).toFixed(1) + const resizeNote = prepared.resized ? ', resized for vision' : '' + span.setAttributes({ + 'copilot.vfs.read.outcome': 'image_prepared', + 'copilot.vfs.read.output.bytes': prepared.buffer.length, + 'copilot.vfs.read.output.media_type': prepared.mediaType, + 'copilot.vfs.read.image.resized': prepared.resized, + }) + return { + content: `Image: ${record.name} (${sizeKb}KB, ${prepared.mediaType}${resizeNote})`, + totalLines: 1, + attachment: { + type: 'image', + source: { + type: 'base64', + media_type: prepared.mediaType, + data: prepared.buffer.toString('base64'), + }, + }, + } + } - if (isReadableType(record.type)) { - if (record.size > MAX_TEXT_READ_BYTES) { - return { - content: `[File too large to display inline: ${record.name} (${record.size} bytes, limit ${MAX_TEXT_READ_BYTES})]`, - totalLines: 1, + if (isReadableType(record.type)) { + span.setAttribute('copilot.vfs.read.path', 'text') + if (record.size > MAX_TEXT_READ_BYTES) { + span.setAttribute('copilot.vfs.read.outcome', 'text_too_large') + return { + content: `[File too large to display inline: ${record.name} (${record.size} bytes, limit ${MAX_TEXT_READ_BYTES})]`, + totalLines: 1, + } + } + + const buffer = await downloadWorkspaceFile(record) + const content = buffer.toString('utf-8') + const lines = content.split('\n').length + span.setAttributes({ + 'copilot.vfs.read.outcome': 'text_read', + 'copilot.vfs.read.output.bytes': buffer.length, + 'copilot.vfs.read.output.lines': lines, + }) + return { content, totalLines: lines } } - } - const buffer = await downloadWorkspaceFile(record) - const content = buffer.toString('utf-8') - return { content, totalLines: content.split('\n').length } - } + const ext = getExtension(record.name) + if (PARSEABLE_EXTENSIONS.has(ext)) { + span.setAttribute('copilot.vfs.read.path', 'parseable_document') + const buffer = await downloadWorkspaceFile(record) + try { + const { parseBuffer } = await import('@/lib/file-parsers') + const result = await parseBuffer(buffer, ext) + const content = result.content || '' + const lines = content.split('\n').length + span.setAttributes({ + 'copilot.vfs.read.outcome': 'document_parsed', + 'copilot.vfs.read.output.bytes': content.length, + 'copilot.vfs.read.output.lines': lines, + }) + return { content, totalLines: lines } + } catch (parseErr) { + logger.warn('Failed to parse document', { + fileName: record.name, + ext, + error: parseErr instanceof Error ? parseErr.message : String(parseErr), + }) + span.addEvent('copilot.vfs.parse_failed', { + 'error.message': + parseErr instanceof Error + ? parseErr.message + : String(parseErr).slice(0, 500), + }) + span.setAttribute('copilot.vfs.read.outcome', 'parse_failed') + return { + content: `[Could not parse ${record.name} (${record.type}, ${record.size} bytes)]`, + totalLines: 1, + } + } + } - const ext = getExtension(record.name) - if (PARSEABLE_EXTENSIONS.has(ext)) { - const buffer = await downloadWorkspaceFile(record) - try { - const { parseBuffer } = await import('@/lib/file-parsers') - const result = await parseBuffer(buffer, ext) - const content = result.content || '' - return { content, totalLines: content.split('\n').length } - } catch (parseErr) { - logger.warn('Failed to parse document', { - fileName: record.name, - ext, - error: parseErr instanceof Error ? parseErr.message : String(parseErr), + span.setAttributes({ + 'copilot.vfs.read.path': 'binary', + 'copilot.vfs.read.outcome': 'binary_placeholder', }) return { - content: `[Could not parse ${record.name} (${record.type}, ${record.size} bytes)]`, + content: `[Binary file: ${record.name} (${record.type}, ${record.size} bytes). Cannot display as text.]`, totalLines: 1, } + } catch (err) { + logger.warn('Failed to read workspace file', { + fileName: record.name, + error: err instanceof Error ? err.message : String(err), + }) + recordSpanError(span, err) + span.setAttribute('copilot.vfs.read.outcome', 'read_failed') + return null + } finally { + span.end() } - } - - return { - content: `[Binary file: ${record.name} (${record.type}, ${record.size} bytes). Cannot display as text.]`, - totalLines: 1, - } - } catch (err) { - logger.warn('Failed to read workspace file', { - fileName: record.name, - error: err instanceof Error ? err.message : String(err), - }) - return null - } + }, + ) } diff --git a/package.json b/package.json index d78396fbb5c..fc3439f62a6 100644 --- a/package.json +++ b/package.json @@ -27,8 +27,10 @@ "mship-tools:check": "bun run scripts/sync-tool-catalog.ts --check", "trace-contracts:generate": "bun run scripts/sync-request-trace-contract.ts", "trace-contracts:check": "bun run scripts/sync-request-trace-contract.ts --check", - "mship:generate": "bun run mship-contracts:generate && bun run mship-tools:generate && bun run trace-contracts:generate", - "mship:check": "bun run mship-contracts:check && bun run mship-tools:check && bun run trace-contracts:check", + "trace-spans-contract:generate": "bun run scripts/sync-trace-spans-contract.ts", + "trace-spans-contract:check": "bun run scripts/sync-trace-spans-contract.ts --check", + "mship:generate": "bun run mship-contracts:generate && bun run mship-tools:generate && bun run trace-contracts:generate && bun run trace-spans-contract:generate", + "mship:check": "bun run mship-contracts:check && bun run mship-tools:check && bun run trace-contracts:check && bun run trace-spans-contract:check", "prepare": "bun husky", "type-check": "turbo run type-check", "release": "bun run scripts/create-single-release.ts" diff --git a/scripts/sync-trace-spans-contract.ts b/scripts/sync-trace-spans-contract.ts new file mode 100644 index 00000000000..b3495753f6c --- /dev/null +++ b/scripts/sync-trace-spans-contract.ts @@ -0,0 +1,155 @@ +import { mkdir, readFile, writeFile } from 'node:fs/promises' +import { dirname, resolve } from 'node:path' +import { fileURLToPath } from 'node:url' + +/** + * Generate `apps/sim/lib/copilot/generated/trace-spans-v1.ts` from the + * Go-side `contracts/trace-spans-v1.schema.json` contract. + * + * The contract is a single-enum JSON Schema. We emit: + * - A `TraceSpansV1Name` const object (key-as-value) for ergonomic + * access: `TraceSpansV1Name['copilot.vfs.read_file']`. + * - A `TraceSpansV1NameValue` union type. + * - A sorted `TraceSpansV1Names` readonly array (useful for tests that + * verify coverage, and for tooling that wants to enumerate names). + * + * We deliberately do NOT pass through `json-schema-to-typescript` — + * it would generate a noisy `TraceSpansV1` object type for the wrapper + * that drives reflection; the wrapper type has no runtime use on the Sim + * side and would obscure the actual enum. + */ +const SCRIPT_DIR = dirname(fileURLToPath(import.meta.url)) +const ROOT = resolve(SCRIPT_DIR, '..') +const DEFAULT_CONTRACT_PATH = resolve( + ROOT, + '../copilot/copilot/contracts/trace-spans-v1.schema.json', +) +const OUTPUT_PATH = resolve( + ROOT, + 'apps/sim/lib/copilot/generated/trace-spans-v1.ts', +) + +function extractSpanNames(schema: Record): string[] { + const defs = (schema.$defs ?? {}) as Record + const nameDef = defs.TraceSpansV1Name + if ( + !nameDef || + typeof nameDef !== 'object' || + !Array.isArray((nameDef as Record).enum) + ) { + throw new Error( + 'trace-spans-v1.schema.json is missing $defs.TraceSpansV1Name.enum', + ) + } + const enumValues = (nameDef as Record).enum as unknown[] + if (!enumValues.every((v) => typeof v === 'string')) { + throw new Error('TraceSpansV1Name enum must be string-only') + } + return (enumValues as string[]).slice().sort() +} + +/** + * Convert a wire name like "copilot.recovery.check_replay_gap" into an + * identifier-safe PascalCase key like "CopilotRecoveryCheckReplayGap", + * so call sites read as `TraceSpan.CopilotRecoveryCheckReplayGap` + * instead of `TraceSpan["copilot.recovery.check_replay_gap"]`. + * + * Splits on `.`, `_`, and non-alphanumeric characters; capitalizes each + * part; collapses. Strict mapping (not a best-effort heuristic), so the + * same input always produces the same identifier. + */ +function toIdentifier(name: string): string { + const parts = name.split(/[^A-Za-z0-9]+/).filter(Boolean) + if (parts.length === 0) { + throw new Error(`Cannot derive identifier for span name: ${name}`) + } + const ident = parts + .map((p) => p.charAt(0).toUpperCase() + p.slice(1).toLowerCase()) + .join('') + // Safety: identifiers may not start with a digit. + if (/^[0-9]/.test(ident)) { + throw new Error( + `Derived identifier "${ident}" for span "${name}" starts with a digit`, + ) + } + return ident +} + +function render(spanNames: string[]): string { + const pairs = spanNames.map((name) => ({ name, ident: toIdentifier(name) })) + + // Guard against collisions: if two wire names ever collapse to the + // same PascalCase identifier, we want a clear build failure, not a + // silent override. + const seen = new Map() + for (const p of pairs) { + const prev = seen.get(p.ident) + if (prev && prev !== p.name) { + throw new Error( + `Identifier collision: "${prev}" and "${p.name}" both map to "${p.ident}"`, + ) + } + seen.set(p.ident, p.name) + } + + const constLines = pairs + .map((p) => ` ${p.ident}: ${JSON.stringify(p.name)},`) + .join('\n') + const arrayEntries = spanNames.map((n) => ` ${JSON.stringify(n)},`).join('\n') + + return `// AUTO-GENERATED FILE. DO NOT EDIT. +// +// Source: copilot/copilot/contracts/trace-spans-v1.schema.json +// Regenerate with: bun run trace-spans-contract:generate +// +// Canonical mothership OTel span names. Call sites should reference +// \`TraceSpan.\` (e.g. \`TraceSpan.CopilotVfsReadFile\`) +// rather than raw string literals, so the Go-side contract is the +// single source of truth and typos become compile errors. + +export const TraceSpan = { +${constLines} +} as const; + +export type TraceSpanKey = keyof typeof TraceSpan; +export type TraceSpanValue = (typeof TraceSpan)[TraceSpanKey]; + +/** Readonly sorted list of every canonical span name. */ +export const TraceSpanValues: readonly TraceSpanValue[] = [ +${arrayEntries} +] as const; +` +} + +async function main() { + const checkOnly = process.argv.includes('--check') + const inputArg = process.argv.find((a) => a.startsWith('--input=')) + const inputPath = inputArg + ? resolve(ROOT, inputArg.slice('--input='.length)) + : DEFAULT_CONTRACT_PATH + + const raw = await readFile(inputPath, 'utf8') + const schema = JSON.parse(raw) + const spanNames = extractSpanNames(schema) + const rendered = render(spanNames) + + if (checkOnly) { + const existing = await readFile(OUTPUT_PATH, 'utf8').catch(() => null) + if (existing !== rendered) { + throw new Error( + 'Generated trace spans contract is stale. Run: bun run trace-spans-contract:generate', + ) + } + console.log('Trace spans contract is up to date.') + return + } + + await mkdir(dirname(OUTPUT_PATH), { recursive: true }) + await writeFile(OUTPUT_PATH, rendered, 'utf8') + console.log(`Generated trace spans types -> ${OUTPUT_PATH}`) +} + +main().catch((err) => { + console.error(err) + process.exit(1) +}) From 62a351a0afae6a17242dc48b118b0aa6b596cea3 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 16 Apr 2026 15:15:20 -0700 Subject: [PATCH 03/10] Fix --- apps/sim/app/api/billing/update-cost/route.ts | 4 +- .../api/copilot/api-keys/validate/route.ts | 2 +- .../app/api/copilot/chat/stream/route.test.ts | 218 +- apps/sim/app/api/copilot/chat/stream/route.ts | 547 ++- apps/sim/app/api/mcp/copilot/route.ts | 685 ++-- apps/sim/instrumentation-node.ts | 10 +- apps/sim/lib/copilot/async-runs/repository.ts | 32 +- apps/sim/lib/copilot/chat/post.ts | 767 ++--- apps/sim/lib/copilot/chat/terminal-state.ts | 4 +- .../generated/mothership-stream-v1-schema.ts | 3006 +++++++---------- .../copilot/generated/mothership-stream-v1.ts | 746 ++-- .../lib/copilot/generated/trace-spans-v1.ts | 184 +- apps/sim/lib/copilot/request/go/fetch.test.ts | 132 +- apps/sim/lib/copilot/request/go/fetch.ts | 97 +- .../sim/lib/copilot/request/go/propagation.ts | 32 +- .../sim/lib/copilot/request/go/stream.test.ts | 386 +-- apps/sim/lib/copilot/request/go/stream.ts | 361 +- apps/sim/lib/copilot/request/handlers/span.ts | 28 +- .../lib/copilot/request/lifecycle/finalize.ts | 5 +- .../request/lifecycle/headless.test.ts | 266 +- .../lib/copilot/request/lifecycle/headless.ts | 79 +- apps/sim/lib/copilot/request/lifecycle/run.ts | 513 ++- .../copilot/request/lifecycle/start.test.ts | 270 +- .../lib/copilot/request/lifecycle/start.ts | 370 +- apps/sim/lib/copilot/request/otel.ts | 261 +- .../copilot/request/session/contract.test.ts | 236 +- .../lib/copilot/request/session/contract.ts | 442 ++- .../copilot/request/session/explicit-abort.ts | 76 +- .../copilot/request/session/recovery.test.ts | 36 +- .../lib/copilot/request/session/recovery.ts | 101 +- apps/sim/lib/copilot/request/subagent.ts | 242 +- .../sim/lib/copilot/request/tools/executor.ts | 35 +- apps/sim/lib/copilot/request/tools/files.ts | 2 +- .../lib/copilot/request/tools/resources.ts | 8 +- apps/sim/lib/copilot/request/tools/tables.ts | 10 +- apps/sim/lib/copilot/request/trace.ts | 100 +- apps/sim/lib/copilot/request/types.ts | 2 +- apps/sim/lib/copilot/vfs/file-reader.test.ts | 4 +- apps/sim/lib/copilot/vfs/file-reader.ts | 332 +- 39 files changed, 4820 insertions(+), 5811 deletions(-) diff --git a/apps/sim/app/api/billing/update-cost/route.ts b/apps/sim/app/api/billing/update-cost/route.ts index c406c5981d8..6147f27e607 100644 --- a/apps/sim/app/api/billing/update-cost/route.ts +++ b/apps/sim/app/api/billing/update-cost/route.ts @@ -42,13 +42,13 @@ export async function POST(req: NextRequest) { 'http.method': 'POST', 'http.route': '/api/billing/update-cost', }, - async (span) => updateCostInner(req, span), + async (span) => updateCostInner(req, span) ) } async function updateCostInner( req: NextRequest, - span: import('@opentelemetry/api').Span, + span: import('@opentelemetry/api').Span ): Promise { const requestId = generateRequestId() const startTime = Date.now() diff --git a/apps/sim/app/api/copilot/api-keys/validate/route.ts b/apps/sim/app/api/copilot/api-keys/validate/route.ts index bd9d751819d..286ab7ff9ec 100644 --- a/apps/sim/app/api/copilot/api-keys/validate/route.ts +++ b/apps/sim/app/api/copilot/api-keys/validate/route.ts @@ -93,6 +93,6 @@ export async function POST(req: NextRequest) { span.setAttribute('http.status_code', 500) return NextResponse.json({ error: 'Failed to validate usage' }, { status: 500 }) } - }, + } ) } diff --git a/apps/sim/app/api/copilot/chat/stream/route.test.ts b/apps/sim/app/api/copilot/chat/stream/route.test.ts index 8b7801754e1..7c99617e592 100644 --- a/apps/sim/app/api/copilot/chat/stream/route.test.ts +++ b/apps/sim/app/api/copilot/chat/stream/route.test.ts @@ -2,12 +2,12 @@ * @vitest-environment node */ -import { NextRequest } from "next/server"; -import { beforeEach, describe, expect, it, vi } from "vitest"; +import { NextRequest } from 'next/server' +import { beforeEach, describe, expect, it, vi } from 'vitest' import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, -} from "@/lib/copilot/generated/mothership-stream-v1"; +} from '@/lib/copilot/generated/mothership-stream-v1' const { getLatestRunForStream, @@ -21,13 +21,13 @@ const { readFilePreviewSessions: vi.fn(), checkForReplayGap: vi.fn(), authenticateCopilotRequestSessionOnly: vi.fn(), -})); +})) -vi.mock("@/lib/copilot/async-runs/repository", () => ({ +vi.mock('@/lib/copilot/async-runs/repository', () => ({ getLatestRunForStream, -})); +})) -vi.mock("@/lib/copilot/request/session", () => ({ +vi.mock('@/lib/copilot/request/session', () => ({ readEvents, readFilePreviewSessions, checkForReplayGap, @@ -37,180 +37,172 @@ vi.mock("@/lib/copilot/request/session", () => ({ cursor: event.cursor, }, seq: event.seq, - trace: { requestId: event.requestId ?? "" }, + trace: { requestId: event.requestId ?? '' }, type: event.type, payload: event.payload, }), encodeSSEEnvelope: (event: Record) => new TextEncoder().encode(`data: ${JSON.stringify(event)}\n\n`), SSE_RESPONSE_HEADERS: { - "Content-Type": "text/event-stream", + 'Content-Type': 'text/event-stream', }, -})); +})) -vi.mock("@/lib/copilot/request/http", () => ({ +vi.mock('@/lib/copilot/request/http', () => ({ authenticateCopilotRequestSessionOnly, -})); +})) -import { GET } from "./route"; +import { GET } from './route' async function readAllChunks(response: Response): Promise { - const reader = response.body?.getReader(); - expect(reader).toBeTruthy(); + const reader = response.body?.getReader() + expect(reader).toBeTruthy() - const chunks: string[] = []; + const chunks: string[] = [] while (true) { - const { done, value } = await reader!.read(); + const { done, value } = await reader!.read() if (done) { - break; + break } - chunks.push(new TextDecoder().decode(value)); + chunks.push(new TextDecoder().decode(value)) } - return chunks; + return chunks } -describe("copilot chat stream replay route", () => { +describe('copilot chat stream replay route', () => { beforeEach(() => { - vi.clearAllMocks(); + vi.clearAllMocks() authenticateCopilotRequestSessionOnly.mockResolvedValue({ - userId: "user-1", + userId: 'user-1', isAuthenticated: true, - }); - readEvents.mockResolvedValue([]); - readFilePreviewSessions.mockResolvedValue([]); - checkForReplayGap.mockResolvedValue(null); - }); + }) + readEvents.mockResolvedValue([]) + readFilePreviewSessions.mockResolvedValue([]) + checkForReplayGap.mockResolvedValue(null) + }) - it("returns preview sessions in batch mode", async () => { + it('returns preview sessions in batch mode', async () => { getLatestRunForStream.mockResolvedValue({ - status: "active", - executionId: "exec-1", - id: "run-1", - }); + status: 'active', + executionId: 'exec-1', + id: 'run-1', + }) readFilePreviewSessions.mockResolvedValue([ { schemaVersion: 1, - id: "preview-1", - streamId: "stream-1", - toolCallId: "preview-1", - status: "streaming", - fileName: "draft.md", - previewText: "hello", + id: 'preview-1', + streamId: 'stream-1', + toolCallId: 'preview-1', + status: 'streaming', + fileName: 'draft.md', + previewText: 'hello', previewVersion: 2, - updatedAt: "2026-04-10T00:00:00.000Z", + updatedAt: '2026-04-10T00:00:00.000Z', }, - ]); + ]) const response = await GET( new NextRequest( - "http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0&batch=true", - ), - ); + 'http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0&batch=true' + ) + ) - expect(response.status).toBe(200); + expect(response.status).toBe(200) await expect(response.json()).resolves.toMatchObject({ success: true, previewSessions: [ expect.objectContaining({ - id: "preview-1", - previewText: "hello", + id: 'preview-1', + previewText: 'hello', previewVersion: 2, }), ], - status: "active", - }); - }); + status: 'active', + }) + }) - it("stops replay polling when run becomes cancelled", async () => { + it('stops replay polling when run becomes cancelled', async () => { getLatestRunForStream .mockResolvedValueOnce({ - status: "active", - executionId: "exec-1", - id: "run-1", + status: 'active', + executionId: 'exec-1', + id: 'run-1', }) .mockResolvedValueOnce({ - status: "cancelled", - executionId: "exec-1", - id: "run-1", - }); + status: 'cancelled', + executionId: 'exec-1', + id: 'run-1', + }) const response = await GET( - new NextRequest( - "http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0", - ), - ); + new NextRequest('http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0') + ) - const chunks = await readAllChunks(response); - expect(chunks.join("")).toContain( + const chunks = await readAllChunks(response) + expect(chunks.join('')).toContain( JSON.stringify({ status: MothershipStreamV1CompletionStatus.cancelled, - reason: "terminal_status", - }), - ); - expect(getLatestRunForStream).toHaveBeenCalledTimes(2); - }); + reason: 'terminal_status', + }) + ) + expect(getLatestRunForStream).toHaveBeenCalledTimes(2) + }) - it("emits structured terminal replay error when run metadata disappears", async () => { + it('emits structured terminal replay error when run metadata disappears', async () => { getLatestRunForStream .mockResolvedValueOnce({ - status: "active", - executionId: "exec-1", - id: "run-1", + status: 'active', + executionId: 'exec-1', + id: 'run-1', }) - .mockResolvedValueOnce(null); + .mockResolvedValueOnce(null) const response = await GET( - new NextRequest( - "http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0", - ), - ); - - const chunks = await readAllChunks(response); - const body = chunks.join(""); - expect(body).toContain(`"type":"${MothershipStreamV1EventType.error}"`); - expect(body).toContain('"code":"resume_run_unavailable"'); - expect(body).toContain(`"type":"${MothershipStreamV1EventType.complete}"`); - }); - - it("uses the latest live request id for synthetic terminal replay events", async () => { + new NextRequest('http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0') + ) + + const chunks = await readAllChunks(response) + const body = chunks.join('') + expect(body).toContain(`"type":"${MothershipStreamV1EventType.error}"`) + expect(body).toContain('"code":"resume_run_unavailable"') + expect(body).toContain(`"type":"${MothershipStreamV1EventType.complete}"`) + }) + + it('uses the latest live request id for synthetic terminal replay events', async () => { getLatestRunForStream .mockResolvedValueOnce({ - status: "active", - executionId: "exec-1", - id: "run-1", + status: 'active', + executionId: 'exec-1', + id: 'run-1', }) .mockResolvedValueOnce({ - status: "cancelled", - executionId: "exec-1", - id: "run-1", - }); + status: 'cancelled', + executionId: 'exec-1', + id: 'run-1', + }) readEvents .mockResolvedValueOnce([ { - stream: { streamId: "stream-1", cursor: "1" }, + stream: { streamId: 'stream-1', cursor: '1' }, seq: 1, - trace: { requestId: "req-live-123" }, + trace: { requestId: 'req-live-123' }, type: MothershipStreamV1EventType.text, payload: { - channel: "assistant", - text: "hello", + channel: 'assistant', + text: 'hello', }, }, ]) - .mockResolvedValueOnce([]); + .mockResolvedValueOnce([]) const response = await GET( - new NextRequest( - "http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0", - ), - ); - - const chunks = await readAllChunks(response); - const terminalChunk = chunks[chunks.length - 1] ?? ""; - expect(terminalChunk).toContain( - `"type":"${MothershipStreamV1EventType.complete}"`, - ); - expect(terminalChunk).toContain('"requestId":"req-live-123"'); - expect(terminalChunk).toContain('"status":"cancelled"'); - }); -}); + new NextRequest('http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0') + ) + + const chunks = await readAllChunks(response) + const terminalChunk = chunks[chunks.length - 1] ?? '' + expect(terminalChunk).toContain(`"type":"${MothershipStreamV1EventType.complete}"`) + expect(terminalChunk).toContain('"requestId":"req-live-123"') + expect(terminalChunk).toContain('"status":"cancelled"') + }) +}) diff --git a/apps/sim/app/api/copilot/chat/stream/route.ts b/apps/sim/app/api/copilot/chat/stream/route.ts index c8a34f160a6..54502269f15 100644 --- a/apps/sim/app/api/copilot/chat/stream/route.ts +++ b/apps/sim/app/api/copilot/chat/stream/route.ts @@ -1,17 +1,13 @@ -import { - context as otelContext, - SpanStatusCode, - trace, -} from "@opentelemetry/api"; -import { createLogger } from "@sim/logger"; -import { type NextRequest, NextResponse } from "next/server"; -import { getLatestRunForStream } from "@/lib/copilot/async-runs/repository"; +import { context as otelContext, SpanStatusCode, trace } from '@opentelemetry/api' +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { getLatestRunForStream } from '@/lib/copilot/async-runs/repository' import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, -} from "@/lib/copilot/generated/mothership-stream-v1"; -import { authenticateCopilotRequestSessionOnly } from "@/lib/copilot/request/http"; -import { getCopilotTracer } from "@/lib/copilot/request/otel"; +} from '@/lib/copilot/generated/mothership-stream-v1' +import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http' +import { getCopilotTracer } from '@/lib/copilot/request/otel' import { checkForReplayGap, createEvent, @@ -19,65 +15,57 @@ import { readEvents, readFilePreviewSessions, SSE_RESPONSE_HEADERS, -} from "@/lib/copilot/request/session"; -import { toStreamBatchEvent } from "@/lib/copilot/request/session/types"; +} from '@/lib/copilot/request/session' +import { toStreamBatchEvent } from '@/lib/copilot/request/session/types' -export const maxDuration = 3600; +export const maxDuration = 3600 -const logger = createLogger("CopilotChatStreamAPI"); -const POLL_INTERVAL_MS = 250; -const MAX_STREAM_MS = 60 * 60 * 1000; +const logger = createLogger('CopilotChatStreamAPI') +const POLL_INTERVAL_MS = 250 +const MAX_STREAM_MS = 60 * 60 * 1000 function extractCanonicalRequestId(value: unknown): string { - return typeof value === "string" && value.length > 0 ? value : ""; + return typeof value === 'string' && value.length > 0 ? value : '' } -function extractRunRequestId( - run: { requestContext?: unknown } | null | undefined, -): string { - if ( - !run || - typeof run.requestContext !== "object" || - run.requestContext === null - ) { - return ""; +function extractRunRequestId(run: { requestContext?: unknown } | null | undefined): string { + if (!run || typeof run.requestContext !== 'object' || run.requestContext === null) { + return '' } - const requestContext = run.requestContext as Record; + const requestContext = run.requestContext as Record return ( extractCanonicalRequestId(requestContext.requestId) || extractCanonicalRequestId(requestContext.simRequestId) - ); + ) } -function extractEnvelopeRequestId(envelope: { - trace?: { requestId?: unknown }; -}): string { - return extractCanonicalRequestId(envelope.trace?.requestId); +function extractEnvelopeRequestId(envelope: { trace?: { requestId?: unknown } }): string { + return extractCanonicalRequestId(envelope.trace?.requestId) } function isTerminalStatus( - status: string | null | undefined, + status: string | null | undefined ): status is MothershipStreamV1CompletionStatus { return ( status === MothershipStreamV1CompletionStatus.complete || status === MothershipStreamV1CompletionStatus.error || status === MothershipStreamV1CompletionStatus.cancelled - ); + ) } function buildResumeTerminalEnvelopes(options: { - streamId: string; - afterCursor: string; - status: MothershipStreamV1CompletionStatus; - message?: string; - code: string; - reason?: string; - requestId?: string; + streamId: string + afterCursor: string + status: MothershipStreamV1CompletionStatus + message?: string + code: string + reason?: string + requestId?: string }) { - const baseSeq = Number(options.afterCursor || "0"); - const seq = Number.isFinite(baseSeq) ? baseSeq : 0; - const envelopes: ReturnType[] = []; - const rid = options.requestId ?? ""; + const baseSeq = Number(options.afterCursor || '0') + const seq = Number.isFinite(baseSeq) ? baseSeq : 0 + const envelopes: ReturnType[] = [] + const rid = options.requestId ?? '' if (options.status === MothershipStreamV1CompletionStatus.error) { envelopes.push( @@ -88,12 +76,11 @@ function buildResumeTerminalEnvelopes(options: { requestId: rid, type: MothershipStreamV1EventType.error, payload: { - message: - options.message || "Stream recovery failed before completion.", + message: options.message || 'Stream recovery failed before completion.', code: options.code, }, - }), - ); + }) + ) } envelopes.push( @@ -107,30 +94,27 @@ function buildResumeTerminalEnvelopes(options: { status: options.status, ...(options.reason ? { reason: options.reason } : {}), }, - }), - ); + }) + ) - return envelopes; + return envelopes } export async function GET(request: NextRequest) { const { userId: authenticatedUserId, isAuthenticated } = - await authenticateCopilotRequestSessionOnly(); + await authenticateCopilotRequestSessionOnly() if (!isAuthenticated || !authenticatedUserId) { - return NextResponse.json({ error: "Unauthorized" }, { status: 401 }); + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) } - const url = new URL(request.url); - const streamId = url.searchParams.get("streamId") || ""; - const afterCursor = url.searchParams.get("after") || ""; - const batchMode = url.searchParams.get("batch") === "true"; + const url = new URL(request.url) + const streamId = url.searchParams.get('streamId') || '' + const afterCursor = url.searchParams.get('after') || '' + const batchMode = url.searchParams.get('batch') === 'true' if (!streamId) { - return NextResponse.json( - { error: "streamId is required" }, - { status: 400 }, - ); + return NextResponse.json({ error: 'streamId is required' }, { status: 400 }) } // Root span for the whole resume/reconnect request. In stream mode the @@ -140,15 +124,15 @@ export async function GET(request: NextRequest) { // manually, capture its context, and re-enter that context inside the // stream callback so every nested `withCopilotSpan` / `withDbSpan` call // attaches to this root. - const rootSpan = getCopilotTracer().startSpan("copilot.resume.request", { + const rootSpan = getCopilotTracer().startSpan('copilot.resume.request', { attributes: { - "copilot.transport": batchMode ? "batch" : "stream", - "stream.id": streamId, - "user.id": authenticatedUserId, - "copilot.resume.after_cursor": afterCursor || "0", + 'copilot.transport': batchMode ? 'batch' : 'stream', + 'stream.id': streamId, + 'user.id': authenticatedUserId, + 'copilot.resume.after_cursor': afterCursor || '0', }, - }); - const rootContext = trace.setSpan(otelContext.active(), rootSpan); + }) + const rootContext = trace.setSpan(otelContext.active(), rootSpan) try { return await otelContext.with(rootContext, () => @@ -160,16 +144,16 @@ export async function GET(request: NextRequest) { authenticatedUserId, rootSpan, rootContext, - }), - ); + }) + ) } catch (err) { rootSpan.setStatus({ code: SpanStatusCode.ERROR, message: err instanceof Error ? err.message : String(err), - }); - rootSpan.recordException(err instanceof Error ? err : new Error(String(err))); - rootSpan.end(); - throw err; + }) + rootSpan.recordException(err instanceof Error ? err : new Error(String(err))) + rootSpan.end() + throw err } } @@ -182,274 +166,261 @@ async function handleResumeRequestBody({ rootSpan, rootContext, }: { - request: NextRequest; - streamId: string; - afterCursor: string; - batchMode: boolean; - authenticatedUserId: string; - rootSpan: import("@opentelemetry/api").Span; - rootContext: import("@opentelemetry/api").Context; + request: NextRequest + streamId: string + afterCursor: string + batchMode: boolean + authenticatedUserId: string + rootSpan: import('@opentelemetry/api').Span + rootContext: import('@opentelemetry/api').Context }) { - - const run = await getLatestRunForStream(streamId, authenticatedUserId).catch( - (err) => { - logger.warn("Failed to fetch latest run for stream", { - streamId, - error: err instanceof Error ? err.message : String(err), - }); - return null; - }, - ); - logger.info("[Resume] Stream lookup", { + const run = await getLatestRunForStream(streamId, authenticatedUserId).catch((err) => { + logger.warn('Failed to fetch latest run for stream', { + streamId, + error: err instanceof Error ? err.message : String(err), + }) + return null + }) + logger.info('[Resume] Stream lookup', { streamId, afterCursor, batchMode, hasRun: !!run, runStatus: run?.status, - }); + }) if (!run) { - rootSpan.setAttribute("copilot.resume.outcome", "stream_not_found"); - rootSpan.end(); - return NextResponse.json({ error: "Stream not found" }, { status: 404 }); + rootSpan.setAttribute('copilot.resume.outcome', 'stream_not_found') + rootSpan.end() + return NextResponse.json({ error: 'Stream not found' }, { status: 404 }) } - rootSpan.setAttribute("copilot.run.status", run.status); + rootSpan.setAttribute('copilot.run.status', run.status) if (batchMode) { - const afterSeq = afterCursor || "0"; + const afterSeq = afterCursor || '0' const [events, previewSessions] = await Promise.all([ readEvents(streamId, afterSeq), readFilePreviewSessions(streamId).catch((error) => { - logger.warn("Failed to read preview sessions for stream batch", { + logger.warn('Failed to read preview sessions for stream batch', { streamId, error: error instanceof Error ? error.message : String(error), - }); - return []; + }) + return [] }), - ]); - const batchEvents = events.map(toStreamBatchEvent); - logger.info("[Resume] Batch response", { + ]) + const batchEvents = events.map(toStreamBatchEvent) + logger.info('[Resume] Batch response', { streamId, afterCursor: afterSeq, eventCount: batchEvents.length, previewSessionCount: previewSessions.length, runStatus: run.status, - }); + }) rootSpan.setAttributes({ - "copilot.resume.outcome": "batch_delivered", - "copilot.resume.event_count": batchEvents.length, - "copilot.resume.preview_session_count": previewSessions.length, - }); - rootSpan.end(); + 'copilot.resume.outcome': 'batch_delivered', + 'copilot.resume.event_count': batchEvents.length, + 'copilot.resume.preview_session_count': previewSessions.length, + }) + rootSpan.end() return NextResponse.json({ success: true, events: batchEvents, previewSessions, status: run.status, - }); + }) } - const startTime = Date.now(); - let totalEventsFlushed = 0; - let pollIterations = 0; + const startTime = Date.now() + let totalEventsFlushed = 0 + let pollIterations = 0 const stream = new ReadableStream({ async start(controller) { // Re-enter the root OTel context so any `withCopilotSpan` call below // (inside flushEvents/checkForReplayGap/etc.) parents under // copilot.resume.request instead of becoming an orphan. - return otelContext.with(rootContext, () => startInner(controller)); + return otelContext.with(rootContext, () => startInner(controller)) }, - }); + }) async function startInner(controller: ReadableStreamDefaultController) { - let cursor = afterCursor || "0"; - let controllerClosed = false; - let sawTerminalEvent = false; - let currentRequestId = extractRunRequestId(run); - - const closeController = () => { - if (controllerClosed) return; - controllerClosed = true; - try { - controller.close(); - } catch { - // Controller already closed by runtime/client - } - }; - - const enqueueEvent = (payload: unknown) => { - if (controllerClosed) return false; - try { - controller.enqueue(encodeSSEEnvelope(payload)); - return true; - } catch { - controllerClosed = true; - return false; - } - }; - - const abortListener = () => { - controllerClosed = true; - }; - request.signal.addEventListener("abort", abortListener, { once: true }); - - const flushEvents = async () => { - const events = await readEvents(streamId, cursor); - if (events.length > 0) { - totalEventsFlushed += events.length; - logger.info("[Resume] Flushing events", { - streamId, - afterCursor: cursor, - eventCount: events.length, - }); + let cursor = afterCursor || '0' + let controllerClosed = false + let sawTerminalEvent = false + let currentRequestId = extractRunRequestId(run) + + const closeController = () => { + if (controllerClosed) return + controllerClosed = true + try { + controller.close() + } catch { + // Controller already closed by runtime/client + } + } + + const enqueueEvent = (payload: unknown) => { + if (controllerClosed) return false + try { + controller.enqueue(encodeSSEEnvelope(payload)) + return true + } catch { + controllerClosed = true + return false + } + } + + const abortListener = () => { + controllerClosed = true + } + request.signal.addEventListener('abort', abortListener, { once: true }) + + const flushEvents = async () => { + const events = await readEvents(streamId, cursor) + if (events.length > 0) { + totalEventsFlushed += events.length + logger.info('[Resume] Flushing events', { + streamId, + afterCursor: cursor, + eventCount: events.length, + }) + } + for (const envelope of events) { + cursor = envelope.stream.cursor ?? String(envelope.seq) + currentRequestId = extractEnvelopeRequestId(envelope) || currentRequestId + if (envelope.type === MothershipStreamV1EventType.complete) { + sawTerminalEvent = true } - for (const envelope of events) { - cursor = envelope.stream.cursor ?? String(envelope.seq); - currentRequestId = - extractEnvelopeRequestId(envelope) || currentRequestId; - if (envelope.type === MothershipStreamV1EventType.complete) { - sawTerminalEvent = true; - } - if (!enqueueEvent(envelope)) { - break; - } + if (!enqueueEvent(envelope)) { + break } - }; - - const emitTerminalIfMissing = ( - status: MothershipStreamV1CompletionStatus, - options?: { message?: string; code: string; reason?: string }, - ) => { - if (controllerClosed || sawTerminalEvent) { - return; + } + } + + const emitTerminalIfMissing = ( + status: MothershipStreamV1CompletionStatus, + options?: { message?: string; code: string; reason?: string } + ) => { + if (controllerClosed || sawTerminalEvent) { + return + } + for (const envelope of buildResumeTerminalEnvelopes({ + streamId, + afterCursor: cursor, + status, + message: options?.message, + code: options?.code ?? 'resume_terminal', + reason: options?.reason, + requestId: currentRequestId, + })) { + cursor = envelope.stream.cursor ?? String(envelope.seq) + if (envelope.type === MothershipStreamV1EventType.complete) { + sawTerminalEvent = true } - for (const envelope of buildResumeTerminalEnvelopes({ - streamId, - afterCursor: cursor, - status, - message: options?.message, - code: options?.code ?? "resume_terminal", - reason: options?.reason, - requestId: currentRequestId, - })) { - cursor = envelope.stream.cursor ?? String(envelope.seq); - if (envelope.type === MothershipStreamV1EventType.complete) { - sawTerminalEvent = true; - } - if (!enqueueEvent(envelope)) { - break; - } + if (!enqueueEvent(envelope)) { + break } - }; + } + } - try { - const gap = await checkForReplayGap( - streamId, - afterCursor, - currentRequestId, - ); - if (gap) { - for (const envelope of gap.envelopes) { - enqueueEvent(envelope); - } - return; + try { + const gap = await checkForReplayGap(streamId, afterCursor, currentRequestId) + if (gap) { + for (const envelope of gap.envelopes) { + enqueueEvent(envelope) } + return + } - await flushEvents(); + await flushEvents() - while (!controllerClosed && Date.now() - startTime < MAX_STREAM_MS) { - pollIterations += 1; - const currentRun = await getLatestRunForStream( - streamId, - authenticatedUserId, - ).catch((err) => { - logger.warn("Failed to poll latest run for stream", { + while (!controllerClosed && Date.now() - startTime < MAX_STREAM_MS) { + pollIterations += 1 + const currentRun = await getLatestRunForStream(streamId, authenticatedUserId).catch( + (err) => { + logger.warn('Failed to poll latest run for stream', { streamId, error: err instanceof Error ? err.message : String(err), - }); - return null; - }); - if (!currentRun) { - emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, { - message: - "The stream could not be recovered because its run metadata is unavailable.", - code: "resume_run_unavailable", - reason: "run_unavailable", - }); - break; + }) + return null } + ) + if (!currentRun) { + emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, { + message: 'The stream could not be recovered because its run metadata is unavailable.', + code: 'resume_run_unavailable', + reason: 'run_unavailable', + }) + break + } - currentRequestId = - extractRunRequestId(currentRun) || currentRequestId; + currentRequestId = extractRunRequestId(currentRun) || currentRequestId - await flushEvents(); + await flushEvents() - if (controllerClosed) { - break; - } - if (isTerminalStatus(currentRun.status)) { - emitTerminalIfMissing(currentRun.status, { - message: - currentRun.status === MothershipStreamV1CompletionStatus.error - ? typeof currentRun.error === "string" - ? currentRun.error - : "The recovered stream ended with an error." - : undefined, - code: "resume_terminal_status", - reason: "terminal_status", - }); - break; - } - - if (request.signal.aborted) { - controllerClosed = true; - break; - } - - await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS)); + if (controllerClosed) { + break } - if (!controllerClosed && Date.now() - startTime >= MAX_STREAM_MS) { - emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, { - message: "The stream recovery timed out before completion.", - code: "resume_timeout", - reason: "timeout", - }); + if (isTerminalStatus(currentRun.status)) { + emitTerminalIfMissing(currentRun.status, { + message: + currentRun.status === MothershipStreamV1CompletionStatus.error + ? typeof currentRun.error === 'string' + ? currentRun.error + : 'The recovered stream ended with an error.' + : undefined, + code: 'resume_terminal_status', + reason: 'terminal_status', + }) + break } - } catch (error) { - if (!controllerClosed && !request.signal.aborted) { - logger.warn("Stream replay failed", { - streamId, - error: error instanceof Error ? error.message : String(error), - }); - emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, { - message: "The stream replay failed before completion.", - code: "resume_internal", - reason: "stream_replay_failed", - }); + + if (request.signal.aborted) { + controllerClosed = true + break } - rootSpan.setStatus({ - code: SpanStatusCode.ERROR, - message: error instanceof Error ? error.message : String(error), - }); - rootSpan.recordException( - error instanceof Error ? error : new Error(String(error)), - ); - } finally { - request.signal.removeEventListener("abort", abortListener); - closeController(); - rootSpan.setAttributes({ - "copilot.resume.outcome": sawTerminalEvent - ? "terminal_delivered" - : controllerClosed - ? "client_disconnected" - : "ended_without_terminal", - "copilot.resume.event_count": totalEventsFlushed, - "copilot.resume.poll_iterations": pollIterations, - "copilot.resume.duration_ms": Date.now() - startTime, - }); - rootSpan.end(); + + await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS)) + } + if (!controllerClosed && Date.now() - startTime >= MAX_STREAM_MS) { + emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, { + message: 'The stream recovery timed out before completion.', + code: 'resume_timeout', + reason: 'timeout', + }) + } + } catch (error) { + if (!controllerClosed && !request.signal.aborted) { + logger.warn('Stream replay failed', { + streamId, + error: error instanceof Error ? error.message : String(error), + }) + emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, { + message: 'The stream replay failed before completion.', + code: 'resume_internal', + reason: 'stream_replay_failed', + }) } + rootSpan.setStatus({ + code: SpanStatusCode.ERROR, + message: error instanceof Error ? error.message : String(error), + }) + rootSpan.recordException(error instanceof Error ? error : new Error(String(error))) + } finally { + request.signal.removeEventListener('abort', abortListener) + closeController() + rootSpan.setAttributes({ + 'copilot.resume.outcome': sawTerminalEvent + ? 'terminal_delivered' + : controllerClosed + ? 'client_disconnected' + : 'ended_without_terminal', + 'copilot.resume.event_count': totalEventsFlushed, + 'copilot.resume.poll_iterations': pollIterations, + 'copilot.resume.duration_ms': Date.now() - startTime, + }) + rootSpan.end() + } } - return new Response(stream, { headers: SSE_RESPONSE_HEADERS }); + return new Response(stream, { headers: SSE_RESPONSE_HEADERS }) } diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index 877d24f75c0..cf8c1d8e170 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -1,5 +1,5 @@ -import { Server } from "@modelcontextprotocol/sdk/server/index.js"; -import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js"; +import { Server } from '@modelcontextprotocol/sdk/server/index.js' +import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js' import { CallToolRequestSchema, type CallToolResult, @@ -9,122 +9,110 @@ import { type ListToolsResult, McpError, type RequestId, -} from "@modelcontextprotocol/sdk/types.js"; -import { db } from "@sim/db"; -import { userStats } from "@sim/db/schema"; -import { createLogger } from "@sim/logger"; -import { eq, sql } from "drizzle-orm"; -import { type NextRequest, NextResponse } from "next/server"; -import { validateOAuthAccessToken } from "@/lib/auth/oauth-token"; -import { getHighestPrioritySubscription } from "@/lib/billing/core/subscription"; -import { generateWorkspaceContext } from "@/lib/copilot/chat/workspace-context"; -import { - ORCHESTRATION_TIMEOUT_MS, - SIM_AGENT_API_URL, -} from "@/lib/copilot/constants"; -import { createRequestId } from "@/lib/copilot/request/http"; -import { runHeadlessCopilotLifecycle } from "@/lib/copilot/request/lifecycle/headless"; -import { orchestrateSubagentStream } from "@/lib/copilot/request/subagent"; -import { - ensureHandlersRegistered, - executeTool, -} from "@/lib/copilot/tool-executor"; -import { prepareExecutionContext } from "@/lib/copilot/tools/handlers/context"; -import { - DIRECT_TOOL_DEFS, - SUBAGENT_TOOL_DEFS, -} from "@/lib/copilot/tools/mcp/definitions"; -import { env } from "@/lib/core/config/env"; -import { RateLimiter } from "@/lib/core/rate-limiter"; -import { getBaseUrl } from "@/lib/core/utils/urls"; -import { generateId } from "@/lib/core/utils/uuid"; +} from '@modelcontextprotocol/sdk/types.js' +import { db } from '@sim/db' +import { userStats } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { eq, sql } from 'drizzle-orm' +import { type NextRequest, NextResponse } from 'next/server' +import { validateOAuthAccessToken } from '@/lib/auth/oauth-token' +import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription' +import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context' +import { ORCHESTRATION_TIMEOUT_MS, SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { createRequestId } from '@/lib/copilot/request/http' +import { runHeadlessCopilotLifecycle } from '@/lib/copilot/request/lifecycle/headless' +import { orchestrateSubagentStream } from '@/lib/copilot/request/subagent' +import { ensureHandlersRegistered, executeTool } from '@/lib/copilot/tool-executor' +import { prepareExecutionContext } from '@/lib/copilot/tools/handlers/context' +import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/definitions' +import { env } from '@/lib/core/config/env' +import { RateLimiter } from '@/lib/core/rate-limiter' +import { getBaseUrl } from '@/lib/core/utils/urls' +import { generateId } from '@/lib/core/utils/uuid' import { authorizeWorkflowByWorkspacePermission, resolveWorkflowIdForUser, -} from "@/lib/workflows/utils"; +} from '@/lib/workflows/utils' -const logger = createLogger("CopilotMcpAPI"); -const mcpRateLimiter = new RateLimiter(); -const DEFAULT_COPILOT_MODEL = "claude-opus-4-6"; +const logger = createLogger('CopilotMcpAPI') +const mcpRateLimiter = new RateLimiter() +const DEFAULT_COPILOT_MODEL = 'claude-opus-4-6' -export const dynamic = "force-dynamic"; -export const runtime = "nodejs"; -export const maxDuration = 3600; +export const dynamic = 'force-dynamic' +export const runtime = 'nodejs' +export const maxDuration = 3600 interface CopilotKeyAuthResult { - success: boolean; - userId?: string; - error?: string; + success: boolean + userId?: string + error?: string } /** * Validates a copilot API key by forwarding it to the Go copilot service's * `/api/validate-key` endpoint. Returns the associated userId on success. */ -async function authenticateCopilotApiKey( - apiKey: string, -): Promise { +async function authenticateCopilotApiKey(apiKey: string): Promise { try { - const internalSecret = env.INTERNAL_API_SECRET; + const internalSecret = env.INTERNAL_API_SECRET if (!internalSecret) { - logger.error("INTERNAL_API_SECRET not configured"); - return { success: false, error: "Server configuration error" }; + logger.error('INTERNAL_API_SECRET not configured') + return { success: false, error: 'Server configuration error' } } - const { fetchGo } = await import("@/lib/copilot/request/go/fetch"); + const { fetchGo } = await import('@/lib/copilot/request/go/fetch') const res = await fetchGo(`${SIM_AGENT_API_URL}/api/validate-key`, { - method: "POST", + method: 'POST', headers: { - "Content-Type": "application/json", - "x-api-key": internalSecret, + 'Content-Type': 'application/json', + 'x-api-key': internalSecret, }, body: JSON.stringify({ targetApiKey: apiKey }), signal: AbortSignal.timeout(10_000), - spanName: "sim → go /api/validate-key (mcp)", - operation: "mcp_validate_key", - }); + spanName: 'sim → go /api/validate-key (mcp)', + operation: 'mcp_validate_key', + }) if (!res.ok) { - const body = await res.json().catch(() => null); - const upstream = (body as Record)?.message; - const status = res.status; + const body = await res.json().catch(() => null) + const upstream = (body as Record)?.message + const status = res.status if (status === 401 || status === 403) { return { success: false, error: `Invalid Copilot API key. Generate a new key in Settings → Copilot and set it in the x-api-key header.`, - }; + } } if (status === 402) { return { success: false, error: `Usage limit exceeded for this Copilot API key. Upgrade your plan or wait for your quota to reset.`, - }; + } } return { success: false, - error: String(upstream ?? "Copilot API key validation failed"), - }; + error: String(upstream ?? 'Copilot API key validation failed'), + } } - const data = (await res.json()) as { ok?: boolean; userId?: string }; + const data = (await res.json()) as { ok?: boolean; userId?: string } if (!data.ok || !data.userId) { return { success: false, - error: - "Invalid Copilot API key. Generate a new key in Settings → Copilot.", - }; + error: 'Invalid Copilot API key. Generate a new key in Settings → Copilot.', + } } - return { success: true, userId: data.userId }; + return { success: true, userId: data.userId } } catch (error) { - logger.error("Copilot API key validation failed", { error }); + logger.error('Copilot API key validation failed', { error }) return { success: false, error: - "Could not validate Copilot API key — the authentication service is temporarily unreachable. This is NOT a problem with the API key itself; please retry shortly.", - }; + 'Could not validate Copilot API key — the authentication service is temporarily unreachable. This is NOT a problem with the API key itself; please retry shortly.', + } } } @@ -166,252 +154,232 @@ When the user refers to a workflow by name or description ("the email one", "my - Tools that operate on a specific workflow such as \`sim_workflow\`, \`sim_test\`, \`sim_deploy\`, and workflow-scoped \`sim_info\` requests require \`workflowId\`. - If the user reports errors, route through \`sim_workflow\` and ask it to reproduce, inspect logs, and fix the issue end to end. - Variable syntax: \`\` for block outputs, \`{{ENV_VAR}}\` for env vars. -`; +` -type HeaderMap = Record; +type HeaderMap = Record -function createError( - id: RequestId, - code: ErrorCode | number, - message: string, -): JSONRPCError { +function createError(id: RequestId, code: ErrorCode | number, message: string): JSONRPCError { return { - jsonrpc: "2.0", + jsonrpc: '2.0', id, error: { code, message }, - }; + } } function normalizeRequestHeaders(request: NextRequest): HeaderMap { - const headers: HeaderMap = {}; + const headers: HeaderMap = {} request.headers.forEach((value, key) => { - headers[key.toLowerCase()] = value; - }); + headers[key.toLowerCase()] = value + }) - return headers; + return headers } -function readHeader( - headers: HeaderMap | undefined, - name: string, -): string | undefined { - if (!headers) return undefined; - const value = headers[name.toLowerCase()]; +function readHeader(headers: HeaderMap | undefined, name: string): string | undefined { + if (!headers) return undefined + const value = headers[name.toLowerCase()] if (Array.isArray(value)) { - return value[0]; + return value[0] } - return value; + return value } class NextResponseCapture { - private _status = 200; - private _headers = new Headers(); - private _controller: ReadableStreamDefaultController | null = - null; - private _pendingChunks: Uint8Array[] = []; - private _closeHandlers: Array<() => void> = []; - private _errorHandlers: Array<(error: Error) => void> = []; - private _headersWritten = false; - private _ended = false; - private _headersPromise: Promise; - private _resolveHeaders: (() => void) | null = null; - private _endedPromise: Promise; - private _resolveEnded: (() => void) | null = null; - readonly readable: ReadableStream; + private _status = 200 + private _headers = new Headers() + private _controller: ReadableStreamDefaultController | null = null + private _pendingChunks: Uint8Array[] = [] + private _closeHandlers: Array<() => void> = [] + private _errorHandlers: Array<(error: Error) => void> = [] + private _headersWritten = false + private _ended = false + private _headersPromise: Promise + private _resolveHeaders: (() => void) | null = null + private _endedPromise: Promise + private _resolveEnded: (() => void) | null = null + readonly readable: ReadableStream constructor() { this._headersPromise = new Promise((resolve) => { - this._resolveHeaders = resolve; - }); + this._resolveHeaders = resolve + }) this._endedPromise = new Promise((resolve) => { - this._resolveEnded = resolve; - }); + this._resolveEnded = resolve + }) this.readable = new ReadableStream({ start: (controller) => { - this._controller = controller; + this._controller = controller if (this._pendingChunks.length > 0) { for (const chunk of this._pendingChunks) { - controller.enqueue(chunk); + controller.enqueue(chunk) } - this._pendingChunks = []; + this._pendingChunks = [] } }, cancel: () => { - this._ended = true; - this._resolveEnded?.(); - this.triggerCloseHandlers(); + this._ended = true + this._resolveEnded?.() + this.triggerCloseHandlers() }, - }); + }) } private markHeadersWritten(): void { - if (this._headersWritten) return; - this._headersWritten = true; - this._resolveHeaders?.(); + if (this._headersWritten) return + this._headersWritten = true + this._resolveHeaders?.() } private triggerCloseHandlers(): void { for (const handler of this._closeHandlers) { try { - handler(); + handler() } catch (error) { - this.triggerErrorHandlers( - error instanceof Error ? error : new Error(String(error)), - ); + this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error))) } } } private triggerErrorHandlers(error: Error): void { for (const errorHandler of this._errorHandlers) { - errorHandler(error); + errorHandler(error) } } private normalizeChunk(chunk: unknown): Uint8Array | null { - if (typeof chunk === "string") { - return new TextEncoder().encode(chunk); + if (typeof chunk === 'string') { + return new TextEncoder().encode(chunk) } if (chunk instanceof Uint8Array) { - return chunk; + return chunk } if (chunk === undefined || chunk === null) { - return null; + return null } - return new TextEncoder().encode(String(chunk)); + return new TextEncoder().encode(String(chunk)) } - writeHead( - status: number, - headers?: Record, - ): this { - this._status = status; + writeHead(status: number, headers?: Record): this { + this._status = status if (headers) { Object.entries(headers).forEach(([key, value]) => { if (Array.isArray(value)) { - this._headers.set(key, value.join(", ")); + this._headers.set(key, value.join(', ')) } else { - this._headers.set(key, String(value)); + this._headers.set(key, String(value)) } - }); + }) } - this.markHeadersWritten(); - return this; + this.markHeadersWritten() + return this } flushHeaders(): this { - this.markHeadersWritten(); - return this; + this.markHeadersWritten() + return this } write(chunk: unknown): boolean { - const normalized = this.normalizeChunk(chunk); - if (!normalized) return true; + const normalized = this.normalizeChunk(chunk) + if (!normalized) return true - this.markHeadersWritten(); + this.markHeadersWritten() if (this._controller) { try { - this._controller.enqueue(normalized); + this._controller.enqueue(normalized) } catch (error) { - this.triggerErrorHandlers( - error instanceof Error ? error : new Error(String(error)), - ); + this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error))) } } else { - this._pendingChunks.push(normalized); + this._pendingChunks.push(normalized) } - return true; + return true } end(chunk?: unknown): this { - if (chunk !== undefined) this.write(chunk); - this.markHeadersWritten(); - if (this._ended) return this; + if (chunk !== undefined) this.write(chunk) + this.markHeadersWritten() + if (this._ended) return this - this._ended = true; - this._resolveEnded?.(); + this._ended = true + this._resolveEnded?.() if (this._controller) { try { - this._controller.close(); + this._controller.close() } catch (error) { - this.triggerErrorHandlers( - error instanceof Error ? error : new Error(String(error)), - ); + this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error))) } } - this.triggerCloseHandlers(); + this.triggerCloseHandlers() - return this; + return this } async waitForHeaders(timeoutMs = 30000): Promise { - if (this._headersWritten) return; + if (this._headersWritten) return await Promise.race([ this._headersPromise, new Promise((resolve) => { - setTimeout(resolve, timeoutMs); + setTimeout(resolve, timeoutMs) }), - ]); + ]) } async waitForEnd(timeoutMs = 30000): Promise { - if (this._ended) return; + if (this._ended) return await Promise.race([ this._endedPromise, new Promise((resolve) => { - setTimeout(resolve, timeoutMs); + setTimeout(resolve, timeoutMs) }), - ]); + ]) } - on( - event: "close" | "error", - handler: (() => void) | ((error: Error) => void), - ): this { - if (event === "close") { - this._closeHandlers.push(handler as () => void); + on(event: 'close' | 'error', handler: (() => void) | ((error: Error) => void)): this { + if (event === 'close') { + this._closeHandlers.push(handler as () => void) } - if (event === "error") { - this._errorHandlers.push(handler as (error: Error) => void); + if (event === 'error') { + this._errorHandlers.push(handler as (error: Error) => void) } - return this; + return this } toNextResponse(): NextResponse { return new NextResponse(this.readable, { status: this._status, headers: this._headers, - }); + }) } } function buildMcpServer(abortSignal?: AbortSignal): Server { const server = new Server( { - name: "sim-copilot", - version: "1.0.0", + name: 'sim-copilot', + version: '1.0.0', }, { capabilities: { tools: {} }, instructions: MCP_SERVER_INSTRUCTIONS, - }, - ); + } + ) server.setRequestHandler(ListToolsRequestSchema, async () => { const directTools = DIRECT_TOOL_DEFS.map((tool) => ({ @@ -419,100 +387,100 @@ function buildMcpServer(abortSignal?: AbortSignal): Server { description: tool.description, inputSchema: tool.inputSchema, ...(tool.annotations && { annotations: tool.annotations }), - })); + })) const subagentTools = SUBAGENT_TOOL_DEFS.map((tool) => ({ name: tool.name, description: tool.description, inputSchema: tool.inputSchema, ...(tool.annotations && { annotations: tool.annotations }), - })); + })) const result: ListToolsResult = { tools: [...directTools, ...subagentTools], - }; + } - return result; - }); + return result + }) server.setRequestHandler(CallToolRequestSchema, async (request, extra) => { - const headers = (extra.requestInfo?.headers || {}) as HeaderMap; - const apiKeyHeader = readHeader(headers, "x-api-key"); - const authorizationHeader = readHeader(headers, "authorization"); + const headers = (extra.requestInfo?.headers || {}) as HeaderMap + const apiKeyHeader = readHeader(headers, 'x-api-key') + const authorizationHeader = readHeader(headers, 'authorization') - let authResult: CopilotKeyAuthResult = { success: false }; + let authResult: CopilotKeyAuthResult = { success: false } - if (authorizationHeader?.startsWith("Bearer ")) { - const token = authorizationHeader.slice(7); - const oauthResult = await validateOAuthAccessToken(token); + if (authorizationHeader?.startsWith('Bearer ')) { + const token = authorizationHeader.slice(7) + const oauthResult = await validateOAuthAccessToken(token) if (oauthResult.success && oauthResult.userId) { - if (!oauthResult.scopes?.includes("mcp:tools")) { + if (!oauthResult.scopes?.includes('mcp:tools')) { return { content: [ { - type: "text" as const, + type: 'text' as const, text: 'AUTHENTICATION ERROR: OAuth token is missing the required "mcp:tools" scope. Re-authorize with the correct scopes.', }, ], isError: true, - }; + } } - authResult = { success: true, userId: oauthResult.userId }; + authResult = { success: true, userId: oauthResult.userId } } else { return { content: [ { - type: "text" as const, - text: `AUTHENTICATION ERROR: ${oauthResult.error ?? "Invalid OAuth access token"} Do NOT retry — re-authorize via OAuth.`, + type: 'text' as const, + text: `AUTHENTICATION ERROR: ${oauthResult.error ?? 'Invalid OAuth access token'} Do NOT retry — re-authorize via OAuth.`, }, ], isError: true, - }; + } } } else if (apiKeyHeader) { - authResult = await authenticateCopilotApiKey(apiKeyHeader); + authResult = await authenticateCopilotApiKey(apiKeyHeader) } if (!authResult.success || !authResult.userId) { const errorMsg = apiKeyHeader ? `AUTHENTICATION ERROR: ${authResult.error} Do NOT retry — this will fail until the user fixes their Copilot API key.` - : "AUTHENTICATION ERROR: No authentication provided. Provide a Bearer token (OAuth 2.1) or an x-api-key header. Generate a Copilot API key in Settings → Copilot."; - logger.warn("MCP copilot auth failed", { method: request.method }); + : 'AUTHENTICATION ERROR: No authentication provided. Provide a Bearer token (OAuth 2.1) or an x-api-key header. Generate a Copilot API key in Settings → Copilot.' + logger.warn('MCP copilot auth failed', { method: request.method }) return { content: [ { - type: "text" as const, + type: 'text' as const, text: errorMsg, }, ], isError: true, - }; + } } const rateLimitResult = await mcpRateLimiter.checkRateLimitWithSubscription( authResult.userId, await getHighestPrioritySubscription(authResult.userId), - "api-endpoint", - false, - ); + 'api-endpoint', + false + ) if (!rateLimitResult.allowed) { return { content: [ { - type: "text" as const, + type: 'text' as const, text: `RATE LIMIT: Too many requests. Please wait and retry after ${rateLimitResult.resetAt.toISOString()}.`, }, ], isError: true, - }; + } } const params = request.params as | { name?: string; arguments?: Record } - | undefined; + | undefined if (!params?.name) { - throw new McpError(ErrorCode.InvalidParams, "Tool name required"); + throw new McpError(ErrorCode.InvalidParams, 'Tool name required') } const result = await handleToolsCall( @@ -521,50 +489,46 @@ function buildMcpServer(abortSignal?: AbortSignal): Server { arguments: params.arguments, }, authResult.userId, - abortSignal, - ); + abortSignal + ) - trackMcpCopilotCall(authResult.userId); + trackMcpCopilotCall(authResult.userId) - return result; - }); + return result + }) - return server; + return server } async function handleMcpRequestWithSdk( request: NextRequest, - parsedBody: unknown, + parsedBody: unknown ): Promise { - const server = buildMcpServer(request.signal); + const server = buildMcpServer(request.signal) const transport = new StreamableHTTPServerTransport({ sessionIdGenerator: undefined, enableJsonResponse: true, - }); + }) - const responseCapture = new NextResponseCapture(); + const responseCapture = new NextResponseCapture() const requestAdapter = { method: request.method, headers: normalizeRequestHeaders(request), - }; + } - await server.connect(transport); + await server.connect(transport) try { - await transport.handleRequest( - requestAdapter as any, - responseCapture as any, - parsedBody, - ); - await responseCapture.waitForHeaders(); + await transport.handleRequest(requestAdapter as any, responseCapture as any, parsedBody) + await responseCapture.waitForHeaders() // Must exceed the longest possible tool execution. // Using ORCHESTRATION_TIMEOUT_MS + 60 s buffer so the orchestrator can // finish or time-out on its own before the transport is torn down. - await responseCapture.waitForEnd(ORCHESTRATION_TIMEOUT_MS + 60_000); - return responseCapture.toNextResponse(); + await responseCapture.waitForEnd(ORCHESTRATION_TIMEOUT_MS + 60_000) + return responseCapture.toNextResponse() } finally { - await server.close().catch(() => {}); - await transport.close().catch(() => {}); + await server.close().catch(() => {}) + await transport.close().catch(() => {}) } } @@ -572,48 +536,41 @@ export async function GET() { // Return 405 to signal that server-initiated SSE notifications are not // supported. Without this, clients like mcp-remote will repeatedly // reconnect trying to open an SSE stream, flooding the logs with GETs. - return new NextResponse(null, { status: 405 }); + return new NextResponse(null, { status: 405 }) } export async function POST(request: NextRequest) { - const hasAuth = - request.headers.has("authorization") || request.headers.has("x-api-key"); + const hasAuth = request.headers.has('authorization') || request.headers.has('x-api-key') if (!hasAuth) { - const origin = getBaseUrl().replace(/\/$/, ""); - const resourceMetadataUrl = `${origin}/.well-known/oauth-protected-resource/api/mcp/copilot`; - return new NextResponse(JSON.stringify({ error: "unauthorized" }), { + const origin = getBaseUrl().replace(/\/$/, '') + const resourceMetadataUrl = `${origin}/.well-known/oauth-protected-resource/api/mcp/copilot` + return new NextResponse(JSON.stringify({ error: 'unauthorized' }), { status: 401, headers: { - "WWW-Authenticate": `Bearer resource_metadata="${resourceMetadataUrl}", scope="mcp:tools"`, - "Content-Type": "application/json", + 'WWW-Authenticate': `Bearer resource_metadata="${resourceMetadataUrl}", scope="mcp:tools"`, + 'Content-Type': 'application/json', }, - }); + }) } try { - let parsedBody: unknown; + let parsedBody: unknown try { - parsedBody = await request.json(); + parsedBody = await request.json() } catch { - return NextResponse.json( - createError(0, ErrorCode.ParseError, "Invalid JSON body"), - { - status: 400, - }, - ); + return NextResponse.json(createError(0, ErrorCode.ParseError, 'Invalid JSON body'), { + status: 400, + }) } - return await handleMcpRequestWithSdk(request, parsedBody); + return await handleMcpRequestWithSdk(request, parsedBody) } catch (error) { - logger.error("Error handling MCP request", { error }); - return NextResponse.json( - createError(0, ErrorCode.InternalError, "Internal error"), - { - status: 500, - }, - ); + logger.error('Error handling MCP request', { error }) + return NextResponse.json(createError(0, ErrorCode.InternalError, 'Internal error'), { + status: 500, + }) } } @@ -621,20 +578,20 @@ export async function OPTIONS() { return new NextResponse(null, { status: 204, headers: { - "Access-Control-Allow-Origin": "*", - "Access-Control-Allow-Methods": "GET, POST, OPTIONS, DELETE", - "Access-Control-Allow-Headers": - "Content-Type, Authorization, X-API-Key, X-Requested-With, Accept", - "Access-Control-Max-Age": "86400", + 'Access-Control-Allow-Origin': '*', + 'Access-Control-Allow-Methods': 'GET, POST, OPTIONS, DELETE', + 'Access-Control-Allow-Headers': + 'Content-Type, Authorization, X-API-Key, X-Requested-With, Accept', + 'Access-Control-Max-Age': '86400', }, - }); + }) } export async function DELETE(request: NextRequest) { - void request; - return NextResponse.json(createError(0, -32000, "Method not allowed."), { + void request + return NextResponse.json(createError(0, -32000, 'Method not allowed.'), { status: 405, - }); + }) } /** @@ -649,82 +606,73 @@ function trackMcpCopilotCall(userId: string): void { .where(eq(userStats.userId, userId)) .then(() => {}) .catch((error) => { - logger.error("Failed to track MCP copilot call", { error, userId }); - }); + logger.error('Failed to track MCP copilot call', { error, userId }) + }) } async function handleToolsCall( params: { name: string; arguments?: Record }, userId: string, - abortSignal?: AbortSignal, + abortSignal?: AbortSignal ): Promise { - const args = params.arguments || {}; + const args = params.arguments || {} - const directTool = DIRECT_TOOL_DEFS.find((tool) => tool.name === params.name); + const directTool = DIRECT_TOOL_DEFS.find((tool) => tool.name === params.name) if (directTool) { - return handleDirectToolCall(directTool, args, userId); + return handleDirectToolCall(directTool, args, userId) } - const subagentTool = SUBAGENT_TOOL_DEFS.find( - (tool) => tool.name === params.name, - ); + const subagentTool = SUBAGENT_TOOL_DEFS.find((tool) => tool.name === params.name) if (subagentTool) { - return handleSubagentToolCall(subagentTool, args, userId, abortSignal); + return handleSubagentToolCall(subagentTool, args, userId, abortSignal) } - throw new McpError( - ErrorCode.MethodNotFound, - `Tool not found: ${params.name}`, - ); + throw new McpError(ErrorCode.MethodNotFound, `Tool not found: ${params.name}`) } async function handleDirectToolCall( toolDef: (typeof DIRECT_TOOL_DEFS)[number], args: Record, - userId: string, + userId: string ): Promise { try { const execContext = await prepareExecutionContext( userId, - (args.workflowId as string) || "", - (args.chatId as string) || undefined, - ); + (args.workflowId as string) || '', + (args.chatId as string) || undefined + ) const toolCall = { id: generateId(), name: toolDef.toolId, - status: "pending" as const, + status: 'pending' as const, params: args as Record, startTime: Date.now(), - }; + } - ensureHandlersRegistered(); - const result = await executeTool( - toolCall.name, - toolCall.params || {}, - execContext, - ); + ensureHandlersRegistered() + const result = await executeTool(toolCall.name, toolCall.params || {}, execContext) return { content: [ { - type: "text", + type: 'text', text: JSON.stringify(result.output ?? result, null, 2), }, ], isError: !result.success, - }; + } } catch (error) { - logger.error("Direct tool execution failed", { tool: toolDef.name, error }); + logger.error('Direct tool execution failed', { tool: toolDef.name, error }) return { content: [ { - type: "text", + type: 'text', text: `Tool execution failed: ${error instanceof Error ? error.message : String(error)}`, }, ], isError: true, - }; + } } } @@ -737,89 +685,76 @@ async function handleDirectToolCall( async function handleBuildToolCall( args: Record, userId: string, - abortSignal?: AbortSignal, + abortSignal?: AbortSignal ): Promise { try { - const requestText = (args.request as string) || JSON.stringify(args); - const workflowId = args.workflowId as string | undefined; - let resolvedWorkflowName: string | undefined; - let resolvedWorkspaceId: string | undefined; + const requestText = (args.request as string) || JSON.stringify(args) + const workflowId = args.workflowId as string | undefined + let resolvedWorkflowName: string | undefined + let resolvedWorkspaceId: string | undefined const resolved = workflowId ? await (async () => { const authorization = await authorizeWorkflowByWorkspacePermission({ workflowId, userId, - action: "read", - }); - resolvedWorkflowName = authorization.workflow?.name || undefined; - resolvedWorkspaceId = - authorization.workflow?.workspaceId || undefined; + action: 'read', + }) + resolvedWorkflowName = authorization.workflow?.name || undefined + resolvedWorkspaceId = authorization.workflow?.workspaceId || undefined return authorization.allowed ? { - status: "resolved" as const, + status: 'resolved' as const, workflowId, workflowName: resolvedWorkflowName, } : { - status: "not_found" as const, - message: - "workflowId is required for build. Call create_workflow first.", - }; + status: 'not_found' as const, + message: 'workflowId is required for build. Call create_workflow first.', + } })() - : await resolveWorkflowIdForUser(userId); + : await resolveWorkflowIdForUser(userId) - if (resolved.status === "resolved") { - resolvedWorkflowName ||= resolved.workflowName; + if (resolved.status === 'resolved') { + resolvedWorkflowName ||= resolved.workflowName } - if (!resolved || resolved.status !== "resolved") { + if (!resolved || resolved.status !== 'resolved') { return { content: [ { - type: "text", + type: 'text', text: JSON.stringify( { success: false, error: resolved?.message ?? - "workflowId is required for build. Call create_workflow first.", + 'workflowId is required for build. Call create_workflow first.', }, null, - 2, + 2 ), }, ], isError: true, - }; + } } - const chatId = generateId(); - const executionContext = await prepareExecutionContext( - userId, - resolved.workflowId, - chatId, - { - workspaceId: resolvedWorkspaceId, - }, - ); - resolvedWorkspaceId = executionContext.workspaceId; - let workspaceContext: string | undefined; + const chatId = generateId() + const executionContext = await prepareExecutionContext(userId, resolved.workflowId, chatId, { + workspaceId: resolvedWorkspaceId, + }) + resolvedWorkspaceId = executionContext.workspaceId + let workspaceContext: string | undefined if (resolvedWorkspaceId) { try { - workspaceContext = await generateWorkspaceContext( - resolvedWorkspaceId, - userId, - ); + workspaceContext = await generateWorkspaceContext(resolvedWorkspaceId, userId) } catch (error) { - logger.warn( - "Failed to generate workspace context for build tool call", - { - workflowId: resolved.workflowId, - workspaceId: resolvedWorkspaceId, - error: error instanceof Error ? error.message : String(error), - }, - ); + logger.warn('Failed to generate workspace context for build tool call', { + workflowId: resolved.workflowId, + workspaceId: resolvedWorkspaceId, + error: error instanceof Error ? error.message : String(error), + }) } } @@ -831,47 +766,47 @@ async function handleBuildToolCall( ...(workspaceContext ? { workspaceContext } : {}), userId, model: DEFAULT_COPILOT_MODEL, - mode: "agent", - commands: ["fast"], + mode: 'agent', + commands: ['fast'], messageId: generateId(), chatId, - }; + } const result = await runHeadlessCopilotLifecycle(requestPayload, { userId, workflowId: resolved.workflowId, workspaceId: resolvedWorkspaceId, chatId, - goRoute: "/api/mcp", + goRoute: '/api/mcp', executionContext, autoExecuteTools: true, timeout: ORCHESTRATION_TIMEOUT_MS, interactive: false, abortSignal, - }); + }) const responseData = { success: result.success, content: result.content, toolCalls: result.toolCalls, error: result.error, - }; + } return { - content: [{ type: "text", text: JSON.stringify(responseData, null, 2) }], + content: [{ type: 'text', text: JSON.stringify(responseData, null, 2) }], isError: !result.success, - }; + } } catch (error) { - logger.error("Build tool call failed", { error }); + logger.error('Build tool call failed', { error }) return { content: [ { - type: "text", + type: 'text', text: `Build failed: ${error instanceof Error ? error.message : String(error)}`, }, ], isError: true, - }; + } } } @@ -879,10 +814,10 @@ async function handleSubagentToolCall( toolDef: (typeof SUBAGENT_TOOL_DEFS)[number], args: Record, userId: string, - abortSignal?: AbortSignal, + abortSignal?: AbortSignal ): Promise { - if (toolDef.agentId === "workflow") { - return handleBuildToolCall(args, userId, abortSignal); + if (toolDef.agentId === 'workflow') { + return handleBuildToolCall(args, userId, abortSignal) } try { @@ -890,12 +825,12 @@ async function handleSubagentToolCall( (args.request as string) || (args.message as string) || (args.error as string) || - JSON.stringify(args); - const simRequestId = createRequestId(); + JSON.stringify(args) + const simRequestId = createRequestId() - const context = (args.context as Record) || {}; + const context = (args.context as Record) || {} if (args.plan && !context.plan) { - context.plan = args.plan; + context.plan = args.plan } const result = await orchestrateSubagentStream( @@ -907,7 +842,7 @@ async function handleSubagentToolCall( context, model: DEFAULT_COPILOT_MODEL, headless: true, - source: "mcp", + source: 'mcp', }, { userId, @@ -915,10 +850,10 @@ async function handleSubagentToolCall( workspaceId: args.workspaceId as string | undefined, simRequestId, abortSignal, - }, - ); + } + ) - let responseData: unknown; + let responseData: unknown if (result.structuredResult) { responseData = { @@ -926,44 +861,44 @@ async function handleSubagentToolCall( type: result.structuredResult.type, summary: result.structuredResult.summary, data: result.structuredResult.data, - }; + } } else if (result.error) { responseData = { success: false, error: result.error, errors: result.errors, - }; + } } else { responseData = { success: result.success, content: result.content, - }; + } } return { content: [ { - type: "text", + type: 'text', text: JSON.stringify(responseData, null, 2), }, ], isError: !result.success, - }; + } } catch (error) { - logger.error("Subagent tool call failed", { + logger.error('Subagent tool call failed', { tool: toolDef.name, agentId: toolDef.agentId, error, - }); + }) return { content: [ { - type: "text", + type: 'text', text: `Subagent call failed: ${error instanceof Error ? error.message : String(error)}`, }, ], isError: true, - }; + } } } diff --git a/apps/sim/instrumentation-node.ts b/apps/sim/instrumentation-node.ts index 936aaaac46c..f3a847a1448 100644 --- a/apps/sim/instrumentation-node.ts +++ b/apps/sim/instrumentation-node.ts @@ -16,7 +16,7 @@ */ import type { Attributes, Context, Link, SpanKind } from '@opentelemetry/api' -import { DiagConsoleLogger, DiagLogLevel, TraceFlags, diag, trace } from '@opentelemetry/api' +import { DiagConsoleLogger, DiagLogLevel, diag, TraceFlags, trace } from '@opentelemetry/api' import type { ReadableSpan, Sampler, @@ -60,13 +60,7 @@ const DEFAULT_TELEMETRY_CONFIG = { * explicitly creates; adding a broad prefix (e.g. `http.`) risks * silently re-enabling non-copilot tracing. */ -const ALLOWED_SPAN_PREFIXES = [ - 'gen_ai.', - 'copilot.', - 'sim →', - 'sim.', - 'tool.execute', -] +const ALLOWED_SPAN_PREFIXES = ['gen_ai.', 'copilot.', 'sim →', 'sim.', 'tool.execute'] function isBusinessSpan(spanName: string): boolean { return ALLOWED_SPAN_PREFIXES.some((prefix) => spanName.startsWith(prefix)) diff --git a/apps/sim/lib/copilot/async-runs/repository.ts b/apps/sim/lib/copilot/async-runs/repository.ts index 6c572e14344..1b97d061204 100644 --- a/apps/sim/lib/copilot/async-runs/repository.ts +++ b/apps/sim/lib/copilot/async-runs/repository.ts @@ -32,16 +32,14 @@ async function withDbSpan( op: string, table: string, attrs: Record, - fn: () => Promise, + fn: () => Promise ): Promise { const span = getAsyncRunsTracer().startSpan(name, { attributes: { 'db.system': 'postgresql', 'db.operation': op, 'db.sql.table': table, - ...Object.fromEntries( - Object.entries(attrs).filter(([, v]) => v !== undefined), - ), + ...Object.fromEntries(Object.entries(attrs).filter(([, v]) => v !== undefined)), }, }) try { @@ -111,7 +109,7 @@ export async function createRunSegment(input: CreateRunSegmentInput) { }) .returning() return run - }, + } ) } @@ -147,7 +145,7 @@ export async function updateRunStatus( .where(eq(copilotRuns.id, runId)) .returning() return run ?? null - }, + } ) } @@ -165,7 +163,7 @@ export async function getLatestRunForExecution(executionId: string) { .orderBy(desc(copilotRuns.startedAt)) .limit(1) return run ?? null - }, + } ) } @@ -189,7 +187,7 @@ export async function getLatestRunForStream(streamId: string, userId?: string) { .orderBy(desc(copilotRuns.startedAt)) .limit(1) return run ?? null - }, + } ) } @@ -202,7 +200,7 @@ export async function getRunSegment(runId: string) { async () => { const [run] = await db.select().from(copilotRuns).where(eq(copilotRuns.id, runId)).limit(1) return run ?? null - }, + } ) } @@ -234,7 +232,7 @@ export async function createRunCheckpoint(input: { .returning() return checkpoint - }, + } ) } @@ -308,7 +306,7 @@ export async function upsertAsyncToolCall(input: { .returning() return row - }, + } ) } @@ -325,7 +323,7 @@ export async function getAsyncToolCall(toolCallId: string) { .where(eq(copilotAsyncToolCalls.toolCallId, toolCallId)) .limit(1) return row ?? null - }, + } ) } @@ -373,7 +371,7 @@ export async function markAsyncToolStatus( .returning() return row ?? null - }, + } ) } @@ -428,7 +426,7 @@ export async function listAsyncToolCallsForRun(runId: string) { .select() .from(copilotAsyncToolCalls) .where(eq(copilotAsyncToolCalls.runId, runId)) - .orderBy(desc(copilotAsyncToolCalls.createdAt)), + .orderBy(desc(copilotAsyncToolCalls.createdAt)) ) } @@ -443,7 +441,7 @@ export async function getAsyncToolCalls(toolCallIds: string[]) { db .select() .from(copilotAsyncToolCalls) - .where(inArray(copilotAsyncToolCalls.toolCallId, toolCallIds)), + .where(inArray(copilotAsyncToolCalls.toolCallId, toolCallIds)) ) } @@ -473,7 +471,7 @@ export async function claimCompletedAsyncToolCall(toolCallId: string, workerId: ) .returning() return row ?? null - }, + } ) } @@ -503,6 +501,6 @@ export async function releaseCompletedAsyncToolClaim(toolCallId: string, workerI ) .returning() return row ?? null - }, + } ) } diff --git a/apps/sim/lib/copilot/chat/post.ts b/apps/sim/lib/copilot/chat/post.ts index ca1aec8d7c5..543c5b8f8d2 100644 --- a/apps/sim/lib/copilot/chat/post.ts +++ b/apps/sim/lib/copilot/chat/post.ts @@ -1,68 +1,50 @@ -import { db } from "@sim/db"; -import { copilotChats } from "@sim/db/schema"; -import { createLogger } from "@sim/logger"; -import { eq, sql } from "drizzle-orm"; -import { type NextRequest, NextResponse } from "next/server"; -import { z } from "zod"; -import { getSession } from "@/lib/auth"; -import { - type ChatLoadResult, - resolveOrCreateChat, -} from "@/lib/copilot/chat/lifecycle"; -import { buildCopilotRequestPayload } from "@/lib/copilot/chat/payload"; +import { type Context as OtelContext, context as otelContextApi } from '@opentelemetry/api' +import { db } from '@sim/db' +import { copilotChats } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { eq, sql } from 'drizzle-orm' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { getSession } from '@/lib/auth' +import { type ChatLoadResult, resolveOrCreateChat } from '@/lib/copilot/chat/lifecycle' +import { buildCopilotRequestPayload } from '@/lib/copilot/chat/payload' import { buildPersistedAssistantMessage, buildPersistedUserMessage, -} from "@/lib/copilot/chat/persisted-message"; +} from '@/lib/copilot/chat/persisted-message' import { processContextsServer, resolveActiveResourceContext, -} from "@/lib/copilot/chat/process-contents"; -import { - context as otelContextApi, - type Context as OtelContext, -} from "@opentelemetry/api"; -import { finalizeAssistantTurn } from "@/lib/copilot/chat/terminal-state"; -import { generateWorkspaceContext } from "@/lib/copilot/chat/workspace-context"; -import { TraceSpan } from "@/lib/copilot/generated/trace-spans-v1"; -import { - startCopilotOtelRoot, - withCopilotSpan, -} from "@/lib/copilot/request/otel"; -import { COPILOT_REQUEST_MODES } from "@/lib/copilot/constants"; +} from '@/lib/copilot/chat/process-contents' +import { finalizeAssistantTurn } from '@/lib/copilot/chat/terminal-state' +import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context' +import { COPILOT_REQUEST_MODES } from '@/lib/copilot/constants' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { createBadRequestResponse, createRequestTracker, createUnauthorizedResponse, -} from "@/lib/copilot/request/http"; -import { - createSSEStream, - SSE_RESPONSE_HEADERS, -} from "@/lib/copilot/request/lifecycle/start"; +} from '@/lib/copilot/request/http' +import { createSSEStream, SSE_RESPONSE_HEADERS } from '@/lib/copilot/request/lifecycle/start' +import { startCopilotOtelRoot, withCopilotSpan } from '@/lib/copilot/request/otel' import { acquirePendingChatStream, getPendingChatStreamId, releasePendingChatStream, -} from "@/lib/copilot/request/session"; -import type { - ExecutionContext, - OrchestratorResult, -} from "@/lib/copilot/request/types"; -import { persistChatResources } from "@/lib/copilot/resources/persistence"; -import { taskPubSub } from "@/lib/copilot/tasks"; -import { prepareExecutionContext } from "@/lib/copilot/tools/handlers/context"; -import { getEffectiveDecryptedEnv } from "@/lib/environment/utils"; -import { - getWorkflowById, - resolveWorkflowIdForUser, -} from "@/lib/workflows/utils"; -import { getUserEntityPermissions } from "@/lib/workspaces/permissions/utils"; -import type { ChatContext } from "@/stores/panel"; +} from '@/lib/copilot/request/session' +import type { ExecutionContext, OrchestratorResult } from '@/lib/copilot/request/types' +import { persistChatResources } from '@/lib/copilot/resources/persistence' +import { taskPubSub } from '@/lib/copilot/tasks' +import { prepareExecutionContext } from '@/lib/copilot/tools/handlers/context' +import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' +import { getWorkflowById, resolveWorkflowIdForUser } from '@/lib/workflows/utils' +import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils' +import type { ChatContext } from '@/stores/panel' -export const maxDuration = 3600; +export const maxDuration = 3600 -const logger = createLogger("UnifiedChatAPI"); -const DEFAULT_MODEL = "claude-opus-4-6"; +const logger = createLogger('UnifiedChatAPI') +const DEFAULT_MODEL = 'claude-opus-4-6' const FileAttachmentSchema = z.object({ id: z.string(), @@ -70,52 +52,40 @@ const FileAttachmentSchema = z.object({ filename: z.string(), media_type: z.string(), size: z.number(), -}); +}) const ResourceAttachmentSchema = z.object({ - type: z.enum([ - "workflow", - "table", - "file", - "knowledgebase", - "folder", - "task", - "log", - "generic", - ]), + type: z.enum(['workflow', 'table', 'file', 'knowledgebase', 'folder', 'task', 'log', 'generic']), id: z.string().min(1), title: z.string().optional(), active: z.boolean().optional(), -}); - -const GENERIC_RESOURCE_TITLE: Record< - z.infer["type"], - string -> = { - workflow: "Workflow", - table: "Table", - file: "File", - knowledgebase: "Knowledge Base", - folder: "Folder", - task: "Task", - log: "Log", - generic: "Resource", -}; +}) + +const GENERIC_RESOURCE_TITLE: Record['type'], string> = { + workflow: 'Workflow', + table: 'Table', + file: 'File', + knowledgebase: 'Knowledge Base', + folder: 'Folder', + task: 'Task', + log: 'Log', + generic: 'Resource', +} const ChatContextSchema = z.object({ kind: z.enum([ - "past_chat", - "workflow", - "current_workflow", - "blocks", - "logs", - "workflow_block", - "knowledge", - "templates", - "docs", - "table", - "file", - "folder", + 'past_chat', + 'workflow', + 'current_workflow', + 'blocks', + 'logs', + 'workflow_block', + 'knowledge', + 'templates', + 'docs', + 'table', + 'file', + 'folder', ]), label: z.string(), chatId: z.string().optional(), @@ -128,17 +98,17 @@ const ChatContextSchema = z.object({ tableId: z.string().optional(), fileId: z.string().optional(), folderId: z.string().optional(), -}); +}) const ChatMessageSchema = z.object({ - message: z.string().min(1, "Message is required"), + message: z.string().min(1, 'Message is required'), userMessageId: z.string().optional(), chatId: z.string().optional(), workflowId: z.string().optional(), workspaceId: z.string().optional(), workflowName: z.string().optional(), model: z.string().optional().default(DEFAULT_MODEL), - mode: z.enum(COPILOT_REQUEST_MODES).optional().default("agent"), + mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'), prefetch: z.boolean().optional(), createNewChat: z.boolean().optional().default(false), implicitFeedback: z.string().optional(), @@ -148,106 +118,98 @@ const ChatMessageSchema = z.object({ contexts: z.array(ChatContextSchema).optional(), commands: z.array(z.string()).optional(), userTimezone: z.string().optional(), -}); +}) -type UnifiedChatRequest = z.infer; +type UnifiedChatRequest = z.infer type UnifiedChatBranch = | { - kind: "workflow"; - workflowId: string; - workflowName?: string; - workspaceId?: string; - selectedModel: string; - mode: UnifiedChatRequest["mode"]; - provider?: string; - goRoute: "/api/copilot"; - titleModel: string; - titleProvider?: string; - notifyWorkspaceStatus: false; + kind: 'workflow' + workflowId: string + workflowName?: string + workspaceId?: string + selectedModel: string + mode: UnifiedChatRequest['mode'] + provider?: string + goRoute: '/api/copilot' + titleModel: string + titleProvider?: string + notifyWorkspaceStatus: false buildPayload: (params: { - message: string; - userId: string; - userMessageId: string; - chatId?: string; - contexts: Array<{ type: string; content: string }>; - fileAttachments?: UnifiedChatRequest["fileAttachments"]; - userPermission?: string; - userTimezone?: string; - workflowId: string; - workflowName?: string; - workspaceId?: string; - mode: UnifiedChatRequest["mode"]; - provider?: string; - commands?: string[]; - prefetch?: boolean; - implicitFeedback?: string; - }) => Promise>; + message: string + userId: string + userMessageId: string + chatId?: string + contexts: Array<{ type: string; content: string }> + fileAttachments?: UnifiedChatRequest['fileAttachments'] + userPermission?: string + userTimezone?: string + workflowId: string + workflowName?: string + workspaceId?: string + mode: UnifiedChatRequest['mode'] + provider?: string + commands?: string[] + prefetch?: boolean + implicitFeedback?: string + }) => Promise> buildExecutionContext: (params: { - userId: string; - chatId?: string; - userTimezone?: string; - messageId: string; - }) => Promise; + userId: string + chatId?: string + userTimezone?: string + messageId: string + }) => Promise } | { - kind: "workspace"; - workspaceId: string; - goRoute: "/api/mothership"; - titleModel: string; - titleProvider?: undefined; - notifyWorkspaceStatus: true; + kind: 'workspace' + workspaceId: string + goRoute: '/api/mothership' + titleModel: string + titleProvider?: undefined + notifyWorkspaceStatus: true buildPayload: (params: { - message: string; - userId: string; - userMessageId: string; - chatId?: string; - contexts: Array<{ type: string; content: string }>; - fileAttachments?: UnifiedChatRequest["fileAttachments"]; - userPermission?: string; - userTimezone?: string; - workspaceContext?: string; - }) => Promise>; + message: string + userId: string + userMessageId: string + chatId?: string + contexts: Array<{ type: string; content: string }> + fileAttachments?: UnifiedChatRequest['fileAttachments'] + userPermission?: string + userTimezone?: string + workspaceContext?: string + }) => Promise> buildExecutionContext: (params: { - userId: string; - chatId?: string; - userTimezone?: string; - messageId: string; - }) => Promise; - }; - -function normalizeContexts(contexts: UnifiedChatRequest["contexts"]) { + userId: string + chatId?: string + userTimezone?: string + messageId: string + }) => Promise + } + +function normalizeContexts(contexts: UnifiedChatRequest['contexts']) { if (!Array.isArray(contexts)) { - return contexts; + return contexts } return contexts.map((ctx) => { - if (ctx.kind !== "blocks") return ctx; - if (Array.isArray(ctx.blockIds) && ctx.blockIds.length > 0) return ctx; - if (ctx.blockId) return { ...ctx, blockIds: [ctx.blockId] }; - return ctx; - }); + if (ctx.kind !== 'blocks') return ctx + if (Array.isArray(ctx.blockIds) && ctx.blockIds.length > 0) return ctx + if (ctx.blockId) return { ...ctx, blockIds: [ctx.blockId] } + return ctx + }) } async function resolveAgentContexts(params: { - contexts?: UnifiedChatRequest["contexts"]; - resourceAttachments?: UnifiedChatRequest["resourceAttachments"]; - userId: string; - message: string; - workspaceId?: string; - chatId?: string; - requestId: string; + contexts?: UnifiedChatRequest['contexts'] + resourceAttachments?: UnifiedChatRequest['resourceAttachments'] + userId: string + message: string + workspaceId?: string + chatId?: string + requestId: string }): Promise> { - const { - contexts, - resourceAttachments, - userId, - message, - workspaceId, - chatId, - requestId, - } = params; + const { contexts, resourceAttachments, userId, message, workspaceId, chatId, requestId } = params - let agentContexts: Array<{ type: string; content: string }> = []; + let agentContexts: Array<{ type: string; content: string }> = [] if (Array.isArray(contexts) && contexts.length > 0) { try { @@ -256,18 +218,14 @@ async function resolveAgentContexts(params: { userId, message, workspaceId, - chatId, - ); + chatId + ) } catch (error) { - logger.error(`[${requestId}] Failed to process contexts`, error); + logger.error(`[${requestId}] Failed to process contexts`, error) } } - if ( - Array.isArray(resourceAttachments) && - resourceAttachments.length > 0 && - workspaceId - ) { + if (Array.isArray(resourceAttachments) && resourceAttachments.length > 0 && workspaceId) { const results = await Promise.allSettled( resourceAttachments.map(async (resource) => { const ctx = await resolveActiveResourceContext( @@ -275,36 +233,33 @@ async function resolveAgentContexts(params: { resource.id, workspaceId, userId, - chatId, - ); - if (!ctx) return null; - return { ...ctx, tag: resource.active ? "@active_tab" : "@open_tab" }; - }), - ); + chatId + ) + if (!ctx) return null + return { ...ctx, tag: resource.active ? '@active_tab' : '@open_tab' } + }) + ) for (const result of results) { - if (result.status === "fulfilled" && result.value) { - agentContexts.push(result.value); - } else if (result.status === "rejected") { - logger.error( - `[${requestId}] Failed to resolve resource attachment`, - result.reason, - ); + if (result.status === 'fulfilled' && result.value) { + agentContexts.push(result.value) + } else if (result.status === 'rejected') { + logger.error(`[${requestId}] Failed to resolve resource attachment`, result.reason) } } } - return agentContexts; + return agentContexts } async function persistUserMessage(params: { - chatId?: string; - userMessageId: string; - message: string; - fileAttachments?: UnifiedChatRequest["fileAttachments"]; - contexts?: UnifiedChatRequest["contexts"]; - workspaceId?: string; - notifyWorkspaceStatus: boolean; + chatId?: string + userMessageId: string + message: string + fileAttachments?: UnifiedChatRequest['fileAttachments'] + contexts?: UnifiedChatRequest['contexts'] + workspaceId?: string + notifyWorkspaceStatus: boolean /** * Root context for the mothership request. When present the persist * span is created explicitly under it, which avoids relying on @@ -313,7 +268,7 @@ async function persistUserMessage(params: { * store out from under us in dev, which would otherwise leave this * span parented to the about-to-be-dropped Next.js HTTP span. */ - parentOtelContext?: OtelContext; + parentOtelContext?: OtelContext }): Promise { const { chatId, @@ -324,20 +279,20 @@ async function persistUserMessage(params: { workspaceId, notifyWorkspaceStatus, parentOtelContext, - } = params; - if (!chatId) return undefined; + } = params + if (!chatId) return undefined return withCopilotSpan( TraceSpan.CopilotChatPersistUserMessage, { - "db.system": "postgresql", - "db.sql.table": "copilot_chats", - "chat.id": chatId, - "chat.user_message_id": userMessageId, - "chat.message_bytes": message.length, - "chat.file_attachment_count": fileAttachments?.length ?? 0, - "chat.context_count": contexts?.length ?? 0, - ...(workspaceId ? { "workspace.id": workspaceId } : {}), + 'db.system': 'postgresql', + 'db.sql.table': 'copilot_chats', + 'chat.id': chatId, + 'chat.user_message_id': userMessageId, + 'chat.message_bytes': message.length, + 'chat.file_attachment_count': fileAttachments?.length ?? 0, + 'chat.context_count': contexts?.length ?? 0, + ...(workspaceId ? { 'workspace.id': workspaceId } : {}), }, async (span) => { const userMsg = buildPersistedUserMessage({ @@ -345,7 +300,7 @@ async function persistUserMessage(params: { content: message, fileAttachments, contexts, - }); + }) const [updated] = await db .update(copilotChats) @@ -355,64 +310,54 @@ async function persistUserMessage(params: { updatedAt: new Date(), }) .where(eq(copilotChats.id, chatId)) - .returning({ messages: copilotChats.messages }); + .returning({ messages: copilotChats.messages }) - const messagesAfter = Array.isArray(updated?.messages) - ? updated.messages - : undefined; + const messagesAfter = Array.isArray(updated?.messages) ? updated.messages : undefined span.setAttributes({ - "chat.persist.outcome": updated ? "appended" : "chat_not_found", - "chat.messages_after": messagesAfter?.length ?? 0, - }); + 'chat.persist.outcome': updated ? 'appended' : 'chat_not_found', + 'chat.messages_after': messagesAfter?.length ?? 0, + }) if (notifyWorkspaceStatus && updated && workspaceId) { taskPubSub?.publishStatusChanged({ workspaceId, chatId, - type: "started", - }); + type: 'started', + }) } - return messagesAfter; + return messagesAfter }, - parentOtelContext, - ); + parentOtelContext + ) } async function buildInitialExecutionContext(params: { - userId: string; - workflowId?: string; - workspaceId?: string; - chatId?: string; - messageId: string; - userTimezone?: string; - requestMode: string; + userId: string + workflowId?: string + workspaceId?: string + chatId?: string + messageId: string + userTimezone?: string + requestMode: string }): Promise { - const { - userId, - workflowId, - workspaceId, - chatId, - messageId, - userTimezone, - requestMode, - } = params; + const { userId, workflowId, workspaceId, chatId, messageId, userTimezone, requestMode } = params if (workflowId && !workspaceId) { - const context = await prepareExecutionContext(userId, workflowId, chatId); + const context = await prepareExecutionContext(userId, workflowId, chatId) return { ...context, messageId, userTimezone, requestMode, copilotToolExecution: true, - }; + } } - const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId); + const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId) return { userId, - workflowId: workflowId ?? "", + workflowId: workflowId ?? '', workspaceId, chatId, decryptedEnvVars, @@ -420,26 +365,20 @@ async function buildInitialExecutionContext(params: { userTimezone, requestMode, copilotToolExecution: true, - }; + } } function buildOnComplete(params: { - chatId?: string; - userMessageId: string; - requestId: string; - workspaceId?: string; - notifyWorkspaceStatus: boolean; + chatId?: string + userMessageId: string + requestId: string + workspaceId?: string + notifyWorkspaceStatus: boolean }) { - const { - chatId, - userMessageId, - requestId, - workspaceId, - notifyWorkspaceStatus, - } = params; + const { chatId, userMessageId, requestId, workspaceId, notifyWorkspaceStatus } = params return async (result: OrchestratorResult) => { - if (!chatId) return; + if (!chatId) return try { await finalizeAssistantTurn({ @@ -447,75 +386,66 @@ function buildOnComplete(params: { userMessageId, ...(result.success ? { - assistantMessage: buildPersistedAssistantMessage( - result, - requestId, - ), + assistantMessage: buildPersistedAssistantMessage(result, requestId), } : {}), - }); + }) if (notifyWorkspaceStatus && workspaceId) { taskPubSub?.publishStatusChanged({ workspaceId, chatId, - type: "completed", - }); + type: 'completed', + }) } } catch (error) { logger.error(`[${requestId}] Failed to persist chat messages`, { chatId, - error: error instanceof Error ? error.message : "Unknown error", - }); + error: error instanceof Error ? error.message : 'Unknown error', + }) } - }; + } } function buildOnError(params: { - chatId?: string; - userMessageId: string; - requestId: string; - workspaceId?: string; - notifyWorkspaceStatus: boolean; + chatId?: string + userMessageId: string + requestId: string + workspaceId?: string + notifyWorkspaceStatus: boolean }) { - const { - chatId, - userMessageId, - requestId, - workspaceId, - notifyWorkspaceStatus, - } = params; + const { chatId, userMessageId, requestId, workspaceId, notifyWorkspaceStatus } = params return async () => { - if (!chatId) return; + if (!chatId) return try { - await finalizeAssistantTurn({ chatId, userMessageId }); + await finalizeAssistantTurn({ chatId, userMessageId }) if (notifyWorkspaceStatus && workspaceId) { taskPubSub?.publishStatusChanged({ workspaceId, chatId, - type: "completed", - }); + type: 'completed', + }) } } catch (error) { logger.error(`[${requestId}] Failed to finalize errored chat stream`, { chatId, - error: error instanceof Error ? error.message : "Unknown error", - }); + error: error instanceof Error ? error.message : 'Unknown error', + }) } - }; + } } async function resolveBranch(params: { - authenticatedUserId: string; - workflowId?: string; - workflowName?: string; - workspaceId?: string; - model?: string; - mode?: UnifiedChatRequest["mode"]; - provider?: string; + authenticatedUserId: string + workflowId?: string + workflowName?: string + workspaceId?: string + model?: string + mode?: UnifiedChatRequest['mode'] + provider?: string }): Promise { const { authenticatedUserId, @@ -525,40 +455,40 @@ async function resolveBranch(params: { model, mode, provider, - } = params; + } = params if (providedWorkflowId || workflowName) { const resolved = await resolveWorkflowIdForUser( authenticatedUserId, providedWorkflowId, workflowName, - requestedWorkspaceId, - ); - if (resolved.status !== "resolved") { - return createBadRequestResponse(resolved.message); + requestedWorkspaceId + ) + if (resolved.status !== 'resolved') { + return createBadRequestResponse(resolved.message) } - const resolvedWorkflowId = resolved.workflowId; - let resolvedWorkspaceId = requestedWorkspaceId; + const resolvedWorkflowId = resolved.workflowId + let resolvedWorkspaceId = requestedWorkspaceId if (!resolvedWorkspaceId) { try { - const workflow = await getWorkflowById(resolvedWorkflowId); - resolvedWorkspaceId = workflow?.workspaceId ?? undefined; + const workflow = await getWorkflowById(resolvedWorkflowId) + resolvedWorkspaceId = workflow?.workspaceId ?? undefined } catch { // best effort; downstream calls can still proceed } } - const selectedModel = model || DEFAULT_MODEL; + const selectedModel = model || DEFAULT_MODEL return { - kind: "workflow", + kind: 'workflow', workflowId: resolvedWorkflowId, workflowName: resolved.workflowName, workspaceId: resolvedWorkspaceId, selectedModel, - mode: mode ?? "agent", + mode: mode ?? 'agent', provider, - goRoute: "/api/copilot", + goRoute: '/api/copilot', titleModel: selectedModel, titleProvider: provider, notifyWorkspaceStatus: false, @@ -571,7 +501,7 @@ async function resolveBranch(params: { workspaceId: payloadParams.workspaceId, userId: payloadParams.userId, userMessageId: payloadParams.userMessageId, - mode: payloadParams.mode ?? "agent", + mode: payloadParams.mode ?? 'agent', model: selectedModel, provider: payloadParams.provider, contexts: payloadParams.contexts, @@ -583,14 +513,9 @@ async function resolveBranch(params: { userPermission: payloadParams.userPermission, userTimezone: payloadParams.userTimezone, }, - { selectedModel }, + { selectedModel } ), - buildExecutionContext: async ({ - userId, - chatId, - userTimezone, - messageId, - }) => + buildExecutionContext: async ({ userId, chatId, userTimezone, messageId }) => buildInitialExecutionContext({ userId, workflowId: resolvedWorkflowId, @@ -598,21 +523,19 @@ async function resolveBranch(params: { chatId, messageId, userTimezone, - requestMode: mode ?? "agent", + requestMode: mode ?? 'agent', }), - }; + } } if (!requestedWorkspaceId) { - return createBadRequestResponse( - "workspaceId is required when workflowId is not provided", - ); + return createBadRequestResponse('workspaceId is required when workflowId is not provided') } return { - kind: "workspace", + kind: 'workspace', workspaceId: requestedWorkspaceId, - goRoute: "/api/mothership", + goRoute: '/api/mothership', titleModel: DEFAULT_MODEL, notifyWorkspaceStatus: true, buildPayload: async (payloadParams) => @@ -622,8 +545,8 @@ async function resolveBranch(params: { workspaceId: requestedWorkspaceId, userId: payloadParams.userId, userMessageId: payloadParams.userMessageId, - mode: "agent", - model: "", + mode: 'agent', + model: '', contexts: payloadParams.contexts, fileAttachments: payloadParams.fileAttachments, chatId: payloadParams.chatId, @@ -631,58 +554,53 @@ async function resolveBranch(params: { userPermission: payloadParams.userPermission, userTimezone: payloadParams.userTimezone, }, - { selectedModel: "" }, + { selectedModel: '' } ), - buildExecutionContext: async ({ - userId, - chatId, - userTimezone, - messageId, - }) => + buildExecutionContext: async ({ userId, chatId, userTimezone, messageId }) => buildInitialExecutionContext({ userId, workspaceId: requestedWorkspaceId, chatId, messageId, userTimezone, - requestMode: "agent", + requestMode: 'agent', }), - }; + } } export async function handleUnifiedChatPost(req: NextRequest) { - const tracker = createRequestTracker(false); - let actualChatId: string | undefined; - let userMessageId = ""; - let chatStreamLockAcquired = false; + const tracker = createRequestTracker(false) + let actualChatId: string | undefined + let userMessageId = '' + let chatStreamLockAcquired = false // Started once we know the streamId (= userMessageId). Every subsequent // span (persistUserMessage, createRunSegment, the whole SSE stream, etc.) // nests under this root via AsyncLocalStorage / explicit propagation, // and the stream's terminal code path calls finish() when the request // actually ends. Errors thrown from the handler before the stream // starts are finished here in the catch below. - let otelRoot: ReturnType | undefined; - const executionId = crypto.randomUUID(); - const runId = crypto.randomUUID(); + let otelRoot: ReturnType | undefined + const executionId = crypto.randomUUID() + const runId = crypto.randomUUID() try { - const session = await getSession(); + const session = await getSession() if (!session?.user?.id) { - return createUnauthorizedResponse(); + return createUnauthorizedResponse() } - const authenticatedUserId = session.user.id; + const authenticatedUserId = session.user.id - const body = ChatMessageSchema.parse(await req.json()); - const normalizedContexts = normalizeContexts(body.contexts); - userMessageId = body.userMessageId || crypto.randomUUID(); + const body = ChatMessageSchema.parse(await req.json()) + const normalizedContexts = normalizeContexts(body.contexts) + userMessageId = body.userMessageId || crypto.randomUUID() otelRoot = startCopilotOtelRoot({ requestId: tracker.requestId, streamId: userMessageId, executionId, runId, - transport: "stream", - }); + transport: 'stream', + }) // Wrap the rest of the handler so every nested withCopilotSpan / // withDbSpan (persistUserMessage, createRunSegment, resolveBranch DB @@ -698,39 +616,34 @@ export async function handleUnifiedChatPost(req: NextRequest) { model: body.model, mode: body.mode, provider: body.provider, - }); + }) if (branch instanceof NextResponse) { - return branch; + return branch } - let currentChat: ChatLoadResult["chat"] = null; - let conversationHistory: unknown[] = []; - let chatIsNew = false; - actualChatId = body.chatId; + let currentChat: ChatLoadResult['chat'] = null + let conversationHistory: unknown[] = [] + let chatIsNew = false + actualChatId = body.chatId if (body.chatId || body.createNewChat) { const chatResult = await resolveOrCreateChat({ chatId: body.chatId, userId: authenticatedUserId, - ...(branch.kind === "workflow" - ? { workflowId: branch.workflowId } - : {}), + ...(branch.kind === 'workflow' ? { workflowId: branch.workflowId } : {}), workspaceId: branch.workspaceId, model: branch.titleModel, - type: branch.kind === "workflow" ? "copilot" : "mothership", - }); - currentChat = chatResult.chat; - actualChatId = chatResult.chatId || body.chatId; - chatIsNew = chatResult.isNew; + type: branch.kind === 'workflow' ? 'copilot' : 'mothership', + }) + currentChat = chatResult.chat + actualChatId = chatResult.chatId || body.chatId + chatIsNew = chatResult.isNew conversationHistory = Array.isArray(chatResult.conversationHistory) ? chatResult.conversationHistory - : []; + : [] if (body.chatId && !currentChat) { - return NextResponse.json( - { error: "Chat not found" }, - { status: 404 }, - ); + return NextResponse.json({ error: 'Chat not found' }, { status: 404 }) } } @@ -741,45 +654,38 @@ export async function handleUnifiedChatPost(req: NextRequest) { type: r.type, id: r.id, title: r.title ?? GENERIC_RESOURCE_TITLE[r.type], - })), - ); + })) + ) } if (actualChatId) { - chatStreamLockAcquired = await acquirePendingChatStream( - actualChatId, - userMessageId, - ); + chatStreamLockAcquired = await acquirePendingChatStream(actualChatId, userMessageId) if (!chatStreamLockAcquired) { - const activeStreamId = await getPendingChatStreamId(actualChatId); + const activeStreamId = await getPendingChatStreamId(actualChatId) return NextResponse.json( { - error: "A response is already in progress for this chat.", + error: 'A response is already in progress for this chat.', ...(activeStreamId ? { activeStreamId } : {}), }, - { status: 409 }, - ); + { status: 409 } + ) } } - const workspaceId = branch.workspaceId; + const workspaceId = branch.workspaceId const userPermissionPromise = workspaceId - ? getUserEntityPermissions( - authenticatedUserId, - "workspace", - workspaceId, - ).catch((error) => { - logger.warn("Failed to load user permissions", { + ? getUserEntityPermissions(authenticatedUserId, 'workspace', workspaceId).catch((error) => { + logger.warn('Failed to load user permissions', { error: error instanceof Error ? error.message : String(error), workspaceId, - }); - return null; + }) + return null }) - : Promise.resolve(null); + : Promise.resolve(null) const workspaceContextPromise = - branch.kind === "workspace" + branch.kind === 'workspace' ? generateWorkspaceContext(branch.workspaceId, authenticatedUserId) - : Promise.resolve(undefined); + : Promise.resolve(undefined) const agentContextsPromise = resolveAgentContexts({ contexts: normalizedContexts, resourceAttachments: body.resourceAttachments, @@ -788,7 +694,7 @@ export async function handleUnifiedChatPost(req: NextRequest) { workspaceId, chatId: actualChatId, requestId: tracker.requestId, - }); + }) const persistedMessagesPromise = persistUserMessage({ chatId: actualChatId, userMessageId, @@ -798,37 +704,32 @@ export async function handleUnifiedChatPost(req: NextRequest) { workspaceId, notifyWorkspaceStatus: branch.notifyWorkspaceStatus, parentOtelContext: otelRoot!.context, - }); + }) const executionContextPromise = branch.buildExecutionContext({ userId: authenticatedUserId, chatId: actualChatId, userTimezone: body.userTimezone, messageId: userMessageId, - }); - - const [ - agentContexts, - userPermission, - workspaceContext, - persistedMessages, - executionContext, - ] = await Promise.all([ - agentContextsPromise, - userPermissionPromise, - workspaceContextPromise, - persistedMessagesPromise, - executionContextPromise, - ]); + }) + + const [agentContexts, userPermission, workspaceContext, persistedMessages, executionContext] = + await Promise.all([ + agentContextsPromise, + userPermissionPromise, + workspaceContextPromise, + persistedMessagesPromise, + executionContextPromise, + ]) if (persistedMessages) { conversationHistory = persistedMessages.filter((message) => { - const record = message as Record; - return record.id !== userMessageId; - }); + const record = message as Record + return record.id !== userMessageId + }) } const requestPayload = - branch.kind === "workflow" + branch.kind === 'workflow' ? await branch.buildPayload({ message: body.message, userId: authenticatedUserId, @@ -857,13 +758,13 @@ export async function handleUnifiedChatPost(req: NextRequest) { userPermission: userPermission ?? undefined, userTimezone: body.userTimezone, workspaceContext, - }); + }) if (actualChatId) { - otelRoot!.span.setAttribute("chat.id", actualChatId); + otelRoot!.span.setAttribute('chat.id', actualChatId) } if (workspaceId) { - otelRoot!.span.setAttribute("workspace.id", workspaceId); + otelRoot!.span.setAttribute('workspace.id', workspaceId) } const stream = createSSEStream({ @@ -877,20 +778,14 @@ export async function handleUnifiedChatPost(req: NextRequest) { isNewChat: conversationHistory.length === 0, message: body.message, titleModel: branch.titleModel, - ...(branch.titleProvider - ? { titleProvider: branch.titleProvider } - : {}), + ...(branch.titleProvider ? { titleProvider: branch.titleProvider } : {}), requestId: tracker.requestId, workspaceId, otelRoot: otelRoot!, orchestrateOptions: { userId: authenticatedUserId, - ...(branch.kind === "workflow" - ? { workflowId: branch.workflowId } - : {}), - ...(branch.kind === "workspace" - ? { workspaceId: branch.workspaceId } - : {}), + ...(branch.kind === 'workflow' ? { workflowId: branch.workflowId } : {}), + ...(branch.kind === 'workspace' ? { workspaceId: branch.workspaceId } : {}), chatId: actualChatId, executionId, runId, @@ -913,33 +808,33 @@ export async function handleUnifiedChatPost(req: NextRequest) { notifyWorkspaceStatus: branch.notifyWorkspaceStatus, }), }, - }); + }) - return new Response(stream, { headers: SSE_RESPONSE_HEADERS }); - }); // end otelContextApi.with + return new Response(stream, { headers: SSE_RESPONSE_HEADERS }) + }) // end otelContextApi.with } catch (error) { if (chatStreamLockAcquired && actualChatId && userMessageId) { - await releasePendingChatStream(actualChatId, userMessageId); + await releasePendingChatStream(actualChatId, userMessageId) } - otelRoot?.finish("error", error); + otelRoot?.finish('error', error) if (error instanceof z.ZodError) { return NextResponse.json( - { error: "Invalid request data", details: error.errors }, - { status: 400 }, - ); + { error: 'Invalid request data', details: error.errors }, + { status: 400 } + ) } logger.error(`[${tracker.requestId}] Error handling unified chat request`, { - error: error instanceof Error ? error.message : "Unknown error", + error: error instanceof Error ? error.message : 'Unknown error', stack: error instanceof Error ? error.stack : undefined, - }); + }) return NextResponse.json( { - error: error instanceof Error ? error.message : "Internal server error", + error: error instanceof Error ? error.message : 'Internal server error', }, - { status: 500 }, - ); + { status: 500 } + ) } } diff --git a/apps/sim/lib/copilot/chat/terminal-state.ts b/apps/sim/lib/copilot/chat/terminal-state.ts index f5c121239c6..221a1885557 100644 --- a/apps/sim/lib/copilot/chat/terminal-state.ts +++ b/apps/sim/lib/copilot/chat/terminal-state.ts @@ -75,8 +75,8 @@ export async function finalizeAssistantTurn({ ? alreadyHasResponse ? 'assistant_already_persisted' : 'stale_user_message' - : 'cleared_stream_marker_only', + : 'cleared_stream_marker_only' ) - }, + } ) } diff --git a/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts b/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts index b702b8de3db..1c670b37b54 100644 --- a/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts +++ b/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts @@ -5,1849 +5,1421 @@ export type JsonSchema = unknown export const MOTHERSHIP_STREAM_V1_SCHEMA: JsonSchema = { - "$defs": { - "MothershipStreamV1AdditionalPropertiesMap": { - "additionalProperties": true, - "type": "object" - }, - "MothershipStreamV1AsyncToolRecordStatus": { - "enum": [ - "pending", - "running", - "completed", - "failed", - "cancelled", - "delivered" - ], - "type": "string" - }, - "MothershipStreamV1CheckpointPauseEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1CheckpointPausePayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "run" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + $defs: { + MothershipStreamV1AdditionalPropertiesMap: { + additionalProperties: true, + type: 'object', + }, + MothershipStreamV1AsyncToolRecordStatus: { + enum: ['pending', 'running', 'completed', 'failed', 'cancelled', 'delivered'], + type: 'string', + }, + MothershipStreamV1CheckpointPauseEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1CheckpointPausePayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['run'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1CheckpointPauseFrame": { - "additionalProperties": false, - "properties": { - "parentToolCallId": { - "type": "string" - }, - "parentToolName": { - "type": "string" - }, - "pendingToolIds": { - "items": { - "type": "string" + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1CheckpointPauseFrame: { + additionalProperties: false, + properties: { + parentToolCallId: { + type: 'string', + }, + parentToolName: { + type: 'string', + }, + pendingToolIds: { + items: { + type: 'string', }, - "type": "array" - } + type: 'array', + }, }, - "required": [ - "parentToolCallId", - "parentToolName", - "pendingToolIds" - ], - "type": "object" - }, - "MothershipStreamV1CheckpointPausePayload": { - "additionalProperties": false, - "properties": { - "checkpointId": { - "type": "string" - }, - "executionId": { - "type": "string" - }, - "frames": { - "items": { - "$ref": "#/$defs/MothershipStreamV1CheckpointPauseFrame" + required: ['parentToolCallId', 'parentToolName', 'pendingToolIds'], + type: 'object', + }, + MothershipStreamV1CheckpointPausePayload: { + additionalProperties: false, + properties: { + checkpointId: { + type: 'string', + }, + executionId: { + type: 'string', + }, + frames: { + items: { + $ref: '#/$defs/MothershipStreamV1CheckpointPauseFrame', }, - "type": "array" + type: 'array', }, - "kind": { - "enum": [ - "checkpoint_pause" - ], - "type": "string" + kind: { + enum: ['checkpoint_pause'], + type: 'string', }, - "pendingToolCallIds": { - "items": { - "type": "string" + pendingToolCallIds: { + items: { + type: 'string', }, - "type": "array" + type: 'array', + }, + runId: { + type: 'string', }, - "runId": { - "type": "string" - } }, - "required": [ - "kind", - "checkpointId", - "runId", - "executionId", - "pendingToolCallIds" - ], - "type": "object" - }, - "MothershipStreamV1CompactionDoneData": { - "additionalProperties": false, - "properties": { - "summary_chars": { - "type": "integer" - } + required: ['kind', 'checkpointId', 'runId', 'executionId', 'pendingToolCallIds'], + type: 'object', + }, + MothershipStreamV1CompactionDoneData: { + additionalProperties: false, + properties: { + summary_chars: { + type: 'integer', + }, }, - "required": [ - "summary_chars" - ], - "type": "object" - }, - "MothershipStreamV1CompactionDoneEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1CompactionDonePayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "run" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['summary_chars'], + type: 'object', + }, + MothershipStreamV1CompactionDoneEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1CompactionDonePayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['run'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1CompactionDonePayload": { - "additionalProperties": false, - "properties": { - "data": { - "$ref": "#/$defs/MothershipStreamV1CompactionDoneData" - }, - "kind": { - "enum": [ - "compaction_done" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1CompactionDonePayload: { + additionalProperties: false, + properties: { + data: { + $ref: '#/$defs/MothershipStreamV1CompactionDoneData', + }, + kind: { + enum: ['compaction_done'], + type: 'string', + }, }, - "required": [ - "kind" - ], - "type": "object" - }, - "MothershipStreamV1CompactionStartEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1CompactionStartPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "run" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind'], + type: 'object', + }, + MothershipStreamV1CompactionStartEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1CompactionStartPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['run'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1CompactionStartPayload": { - "additionalProperties": false, - "properties": { - "kind": { - "enum": [ - "compaction_start" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1CompactionStartPayload: { + additionalProperties: false, + properties: { + kind: { + enum: ['compaction_start'], + type: 'string', + }, }, - "required": [ - "kind" - ], - "type": "object" - }, - "MothershipStreamV1CompleteEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1CompletePayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "complete" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind'], + type: 'object', + }, + MothershipStreamV1CompleteEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1CompletePayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['complete'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1CompletePayload": { - "additionalProperties": false, - "properties": { - "cost": { - "$ref": "#/$defs/MothershipStreamV1CostData" - }, - "reason": { - "type": "string" - }, - "response": true, - "status": { - "$ref": "#/$defs/MothershipStreamV1CompletionStatus" - }, - "usage": { - "$ref": "#/$defs/MothershipStreamV1UsageData" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1CompletePayload: { + additionalProperties: false, + properties: { + cost: { + $ref: '#/$defs/MothershipStreamV1CostData', + }, + reason: { + type: 'string', + }, + response: true, + status: { + $ref: '#/$defs/MothershipStreamV1CompletionStatus', + }, + usage: { + $ref: '#/$defs/MothershipStreamV1UsageData', + }, }, - "required": [ - "status" - ], - "type": "object" - }, - "MothershipStreamV1CompletionStatus": { - "enum": [ - "complete", - "error", - "cancelled" - ], - "type": "string" - }, - "MothershipStreamV1CostData": { - "additionalProperties": false, - "properties": { - "input": { - "type": "number" - }, - "output": { - "type": "number" - }, - "total": { - "type": "number" - } + required: ['status'], + type: 'object', + }, + MothershipStreamV1CompletionStatus: { + enum: ['complete', 'error', 'cancelled'], + type: 'string', + }, + MothershipStreamV1CostData: { + additionalProperties: false, + properties: { + input: { + type: 'number', + }, + output: { + type: 'number', + }, + total: { + type: 'number', + }, }, - "type": "object" + type: 'object', }, - "MothershipStreamV1ErrorEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1ErrorPayload" + MothershipStreamV1ErrorEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1ErrorPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" + seq: { + type: 'integer', }, - "seq": { - "type": "integer" + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" + ts: { + type: 'string', }, - "ts": { - "type": "string" + type: { + enum: ['error'], + type: 'string', }, - "type": { - "enum": [ - "error" - ], - "type": "string" + v: { + enum: [1], + type: 'integer', }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1ErrorPayload": { - "additionalProperties": false, - "properties": { - "code": { - "type": "string" - }, - "data": true, - "displayMessage": { - "type": "string" - }, - "error": { - "type": "string" - }, - "message": { - "type": "string" - }, - "provider": { - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1ErrorPayload: { + additionalProperties: false, + properties: { + code: { + type: 'string', + }, + data: true, + displayMessage: { + type: 'string', + }, + error: { + type: 'string', + }, + message: { + type: 'string', + }, + provider: { + type: 'string', + }, }, - "required": [ - "message" - ], - "type": "object" - }, - "MothershipStreamV1EventEnvelopeCommon": { - "additionalProperties": false, - "properties": { - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['message'], + type: 'object', + }, + MothershipStreamV1EventEnvelopeCommon: { + additionalProperties: false, + properties: { + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream" - ], - "type": "object" - }, - "MothershipStreamV1EventType": { - "enum": [ - "session", - "text", - "tool", - "span", - "resource", - "run", - "error", - "complete" - ], - "type": "string" - }, - "MothershipStreamV1ResourceDescriptor": { - "additionalProperties": false, - "properties": { - "id": { - "type": "string" - }, - "title": { - "type": "string" - }, - "type": { - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream'], + type: 'object', + }, + MothershipStreamV1EventType: { + enum: ['session', 'text', 'tool', 'span', 'resource', 'run', 'error', 'complete'], + type: 'string', + }, + MothershipStreamV1ResourceDescriptor: { + additionalProperties: false, + properties: { + id: { + type: 'string', + }, + title: { + type: 'string', + }, + type: { + type: 'string', + }, }, - "required": [ - "type", - "id" - ], - "type": "object" - }, - "MothershipStreamV1ResourceOp": { - "enum": [ - "upsert", - "remove" - ], - "type": "string" - }, - "MothershipStreamV1ResourceRemoveEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1ResourceRemovePayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "resource" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['type', 'id'], + type: 'object', + }, + MothershipStreamV1ResourceOp: { + enum: ['upsert', 'remove'], + type: 'string', + }, + MothershipStreamV1ResourceRemoveEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1ResourceRemovePayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['resource'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1ResourceRemovePayload": { - "additionalProperties": false, - "properties": { - "op": { - "enum": [ - "remove" - ], - "type": "string" - }, - "resource": { - "$ref": "#/$defs/MothershipStreamV1ResourceDescriptor" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1ResourceRemovePayload: { + additionalProperties: false, + properties: { + op: { + enum: ['remove'], + type: 'string', + }, + resource: { + $ref: '#/$defs/MothershipStreamV1ResourceDescriptor', + }, }, - "required": [ - "op", - "resource" - ], - "type": "object" - }, - "MothershipStreamV1ResourceUpsertEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1ResourceUpsertPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "resource" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['op', 'resource'], + type: 'object', + }, + MothershipStreamV1ResourceUpsertEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1ResourceUpsertPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['resource'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1ResourceUpsertPayload": { - "additionalProperties": false, - "properties": { - "op": { - "enum": [ - "upsert" - ], - "type": "string" - }, - "resource": { - "$ref": "#/$defs/MothershipStreamV1ResourceDescriptor" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1ResourceUpsertPayload: { + additionalProperties: false, + properties: { + op: { + enum: ['upsert'], + type: 'string', + }, + resource: { + $ref: '#/$defs/MothershipStreamV1ResourceDescriptor', + }, }, - "required": [ - "op", - "resource" - ], - "type": "object" - }, - "MothershipStreamV1ResumeRequest": { - "additionalProperties": false, - "properties": { - "checkpointId": { - "type": "string" - }, - "results": { - "items": { - "$ref": "#/$defs/MothershipStreamV1ResumeToolResult" + required: ['op', 'resource'], + type: 'object', + }, + MothershipStreamV1ResumeRequest: { + additionalProperties: false, + properties: { + checkpointId: { + type: 'string', + }, + results: { + items: { + $ref: '#/$defs/MothershipStreamV1ResumeToolResult', }, - "type": "array" + type: 'array', + }, + streamId: { + type: 'string', }, - "streamId": { - "type": "string" - } }, - "required": [ - "streamId", - "checkpointId", - "results" - ], - "type": "object" - }, - "MothershipStreamV1ResumeToolResult": { - "additionalProperties": false, - "properties": { - "error": { - "type": "string" - }, - "output": true, - "success": { - "type": "boolean" - }, - "toolCallId": { - "type": "string" - } + required: ['streamId', 'checkpointId', 'results'], + type: 'object', + }, + MothershipStreamV1ResumeToolResult: { + additionalProperties: false, + properties: { + error: { + type: 'string', + }, + output: true, + success: { + type: 'boolean', + }, + toolCallId: { + type: 'string', + }, }, - "required": [ - "toolCallId", - "success" - ], - "type": "object" - }, - "MothershipStreamV1RunKind": { - "enum": [ - "checkpoint_pause", - "resumed", - "compaction_start", - "compaction_done" - ], - "type": "string" - }, - "MothershipStreamV1RunResumedEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1RunResumedPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "run" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['toolCallId', 'success'], + type: 'object', + }, + MothershipStreamV1RunKind: { + enum: ['checkpoint_pause', 'resumed', 'compaction_start', 'compaction_done'], + type: 'string', + }, + MothershipStreamV1RunResumedEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1RunResumedPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['run'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1RunResumedPayload": { - "additionalProperties": false, - "properties": { - "kind": { - "enum": [ - "resumed" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1RunResumedPayload: { + additionalProperties: false, + properties: { + kind: { + enum: ['resumed'], + type: 'string', + }, }, - "required": [ - "kind" - ], - "type": "object" - }, - "MothershipStreamV1SessionChatEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1SessionChatPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "session" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind'], + type: 'object', + }, + MothershipStreamV1SessionChatEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1SessionChatPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['session'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1SessionChatPayload": { - "additionalProperties": false, - "properties": { - "chatId": { - "type": "string" - }, - "kind": { - "enum": [ - "chat" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1SessionChatPayload: { + additionalProperties: false, + properties: { + chatId: { + type: 'string', + }, + kind: { + enum: ['chat'], + type: 'string', + }, }, - "required": [ - "kind", - "chatId" - ], - "type": "object" - }, - "MothershipStreamV1SessionKind": { - "enum": [ - "trace", - "chat", - "title", - "start" - ], - "type": "string" - }, - "MothershipStreamV1SessionStartData": { - "additionalProperties": false, - "properties": { - "responseId": { - "type": "string" - } + required: ['kind', 'chatId'], + type: 'object', + }, + MothershipStreamV1SessionKind: { + enum: ['trace', 'chat', 'title', 'start'], + type: 'string', + }, + MothershipStreamV1SessionStartData: { + additionalProperties: false, + properties: { + responseId: { + type: 'string', + }, }, - "type": "object" + type: 'object', }, - "MothershipStreamV1SessionStartEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1SessionStartPayload" + MothershipStreamV1SessionStartEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1SessionStartPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" + seq: { + type: 'integer', }, - "seq": { - "type": "integer" + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" + ts: { + type: 'string', }, - "ts": { - "type": "string" + type: { + enum: ['session'], + type: 'string', }, - "type": { - "enum": [ - "session" - ], - "type": "string" + v: { + enum: [1], + type: 'integer', }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1SessionStartPayload": { - "additionalProperties": false, - "properties": { - "data": { - "$ref": "#/$defs/MothershipStreamV1SessionStartData" - }, - "kind": { - "enum": [ - "start" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1SessionStartPayload: { + additionalProperties: false, + properties: { + data: { + $ref: '#/$defs/MothershipStreamV1SessionStartData', + }, + kind: { + enum: ['start'], + type: 'string', + }, }, - "required": [ - "kind" - ], - "type": "object" - }, - "MothershipStreamV1SessionTitleEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1SessionTitlePayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "session" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind'], + type: 'object', + }, + MothershipStreamV1SessionTitleEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1SessionTitlePayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['session'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1SessionTitlePayload": { - "additionalProperties": false, - "properties": { - "kind": { - "enum": [ - "title" - ], - "type": "string" - }, - "title": { - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1SessionTitlePayload: { + additionalProperties: false, + properties: { + kind: { + enum: ['title'], + type: 'string', + }, + title: { + type: 'string', + }, }, - "required": [ - "kind", - "title" - ], - "type": "object" - }, - "MothershipStreamV1SessionTraceEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1SessionTracePayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "session" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind', 'title'], + type: 'object', + }, + MothershipStreamV1SessionTraceEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1SessionTracePayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['session'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1SessionTracePayload": { - "additionalProperties": false, - "properties": { - "kind": { - "enum": [ - "trace" - ], - "type": "string" - }, - "requestId": { - "type": "string" - }, - "spanId": { - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1SessionTracePayload: { + additionalProperties: false, + properties: { + kind: { + enum: ['trace'], + type: 'string', + }, + requestId: { + type: 'string', + }, + spanId: { + type: 'string', + }, }, - "required": [ - "kind", - "requestId" - ], - "type": "object" - }, - "MothershipStreamV1SpanKind": { - "enum": [ - "subagent" - ], - "type": "string" - }, - "MothershipStreamV1SpanLifecycleEvent": { - "enum": [ - "start", - "end" - ], - "type": "string" - }, - "MothershipStreamV1SpanPayloadKind": { - "enum": [ - "subagent", - "structured_result", - "subagent_result" - ], - "type": "string" - }, - "MothershipStreamV1StreamCursor": { - "additionalProperties": false, - "properties": { - "cursor": { - "type": "string" - }, - "seq": { - "type": "integer" - }, - "streamId": { - "type": "string" - } + required: ['kind', 'requestId'], + type: 'object', + }, + MothershipStreamV1SpanKind: { + enum: ['subagent'], + type: 'string', + }, + MothershipStreamV1SpanLifecycleEvent: { + enum: ['start', 'end'], + type: 'string', + }, + MothershipStreamV1SpanPayloadKind: { + enum: ['subagent', 'structured_result', 'subagent_result'], + type: 'string', + }, + MothershipStreamV1StreamCursor: { + additionalProperties: false, + properties: { + cursor: { + type: 'string', + }, + seq: { + type: 'integer', + }, + streamId: { + type: 'string', + }, }, - "required": [ - "streamId", - "cursor", - "seq" - ], - "type": "object" - }, - "MothershipStreamV1StreamRef": { - "additionalProperties": false, - "properties": { - "chatId": { - "type": "string" - }, - "cursor": { - "type": "string" - }, - "streamId": { - "type": "string" - } + required: ['streamId', 'cursor', 'seq'], + type: 'object', + }, + MothershipStreamV1StreamRef: { + additionalProperties: false, + properties: { + chatId: { + type: 'string', + }, + cursor: { + type: 'string', + }, + streamId: { + type: 'string', + }, }, - "required": [ - "streamId" - ], - "type": "object" - }, - "MothershipStreamV1StreamScope": { - "additionalProperties": false, - "properties": { - "agentId": { - "type": "string" - }, - "lane": { - "enum": [ - "subagent" - ], - "type": "string" - }, - "parentToolCallId": { - "type": "string" - } + required: ['streamId'], + type: 'object', + }, + MothershipStreamV1StreamScope: { + additionalProperties: false, + properties: { + agentId: { + type: 'string', + }, + lane: { + enum: ['subagent'], + type: 'string', + }, + parentToolCallId: { + type: 'string', + }, }, - "required": [ - "lane" - ], - "type": "object" - }, - "MothershipStreamV1StructuredResultSpanEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1StructuredResultSpanPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "span" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['lane'], + type: 'object', + }, + MothershipStreamV1StructuredResultSpanEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1StructuredResultSpanPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['span'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1StructuredResultSpanPayload": { - "additionalProperties": false, - "properties": { - "agent": { - "type": "string" - }, - "data": true, - "kind": { - "enum": [ - "structured_result" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1StructuredResultSpanPayload: { + additionalProperties: false, + properties: { + agent: { + type: 'string', + }, + data: true, + kind: { + enum: ['structured_result'], + type: 'string', + }, }, - "required": [ - "kind" - ], - "type": "object" - }, - "MothershipStreamV1SubagentResultSpanEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1SubagentResultSpanPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "span" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind'], + type: 'object', + }, + MothershipStreamV1SubagentResultSpanEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1SubagentResultSpanPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['span'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1SubagentResultSpanPayload": { - "additionalProperties": false, - "properties": { - "agent": { - "type": "string" - }, - "data": true, - "kind": { - "enum": [ - "subagent_result" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1SubagentResultSpanPayload: { + additionalProperties: false, + properties: { + agent: { + type: 'string', + }, + data: true, + kind: { + enum: ['subagent_result'], + type: 'string', + }, }, - "required": [ - "kind" - ], - "type": "object" - }, - "MothershipStreamV1SubagentSpanEndEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1SubagentSpanEndPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "span" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind'], + type: 'object', + }, + MothershipStreamV1SubagentSpanEndEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1SubagentSpanEndPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['span'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1SubagentSpanEndPayload": { - "additionalProperties": false, - "properties": { - "agent": { - "type": "string" - }, - "data": true, - "event": { - "enum": [ - "end" - ], - "type": "string" - }, - "kind": { - "enum": [ - "subagent" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1SubagentSpanEndPayload: { + additionalProperties: false, + properties: { + agent: { + type: 'string', + }, + data: true, + event: { + enum: ['end'], + type: 'string', + }, + kind: { + enum: ['subagent'], + type: 'string', + }, }, - "required": [ - "kind", - "event" - ], - "type": "object" - }, - "MothershipStreamV1SubagentSpanStartEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1SubagentSpanStartPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "span" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind', 'event'], + type: 'object', + }, + MothershipStreamV1SubagentSpanStartEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1SubagentSpanStartPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['span'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1SubagentSpanStartPayload": { - "additionalProperties": false, - "properties": { - "agent": { - "type": "string" - }, - "data": true, - "event": { - "enum": [ - "start" - ], - "type": "string" - }, - "kind": { - "enum": [ - "subagent" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1SubagentSpanStartPayload: { + additionalProperties: false, + properties: { + agent: { + type: 'string', + }, + data: true, + event: { + enum: ['start'], + type: 'string', + }, + kind: { + enum: ['subagent'], + type: 'string', + }, }, - "required": [ - "kind", - "event" - ], - "type": "object" - }, - "MothershipStreamV1TextChannel": { - "enum": [ - "assistant", - "thinking" - ], - "type": "string" - }, - "MothershipStreamV1TextEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1TextPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "text" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind', 'event'], + type: 'object', + }, + MothershipStreamV1TextChannel: { + enum: ['assistant', 'thinking'], + type: 'string', + }, + MothershipStreamV1TextEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1TextPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['text'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1TextPayload": { - "additionalProperties": false, - "properties": { - "channel": { - "$ref": "#/$defs/MothershipStreamV1TextChannel" - }, - "text": { - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1TextPayload: { + additionalProperties: false, + properties: { + channel: { + $ref: '#/$defs/MothershipStreamV1TextChannel', + }, + text: { + type: 'string', + }, }, - "required": [ - "channel", - "text" - ], - "type": "object" - }, - "MothershipStreamV1ToolArgsDeltaEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1ToolArgsDeltaPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "tool" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['channel', 'text'], + type: 'object', + }, + MothershipStreamV1ToolArgsDeltaEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1ToolArgsDeltaPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['tool'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1ToolArgsDeltaPayload": { - "additionalProperties": false, - "properties": { - "argumentsDelta": { - "type": "string" - }, - "executor": { - "$ref": "#/$defs/MothershipStreamV1ToolExecutor" - }, - "mode": { - "$ref": "#/$defs/MothershipStreamV1ToolMode" - }, - "phase": { - "enum": [ - "args_delta" - ], - "type": "string" - }, - "toolCallId": { - "type": "string" - }, - "toolName": { - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1ToolArgsDeltaPayload: { + additionalProperties: false, + properties: { + argumentsDelta: { + type: 'string', + }, + executor: { + $ref: '#/$defs/MothershipStreamV1ToolExecutor', + }, + mode: { + $ref: '#/$defs/MothershipStreamV1ToolMode', + }, + phase: { + enum: ['args_delta'], + type: 'string', + }, + toolCallId: { + type: 'string', + }, + toolName: { + type: 'string', + }, }, - "required": [ - "toolCallId", - "toolName", - "argumentsDelta", - "executor", - "mode", - "phase" - ], - "type": "object" - }, - "MothershipStreamV1ToolCallDescriptor": { - "additionalProperties": false, - "properties": { - "arguments": { - "$ref": "#/$defs/MothershipStreamV1AdditionalPropertiesMap" - }, - "executor": { - "$ref": "#/$defs/MothershipStreamV1ToolExecutor" - }, - "mode": { - "$ref": "#/$defs/MothershipStreamV1ToolMode" - }, - "partial": { - "type": "boolean" - }, - "phase": { - "enum": [ - "call" - ], - "type": "string" - }, - "requiresConfirmation": { - "type": "boolean" - }, - "status": { - "$ref": "#/$defs/MothershipStreamV1ToolStatus" - }, - "toolCallId": { - "type": "string" - }, - "toolName": { - "type": "string" - }, - "ui": { - "$ref": "#/$defs/MothershipStreamV1ToolUI" - } + required: ['toolCallId', 'toolName', 'argumentsDelta', 'executor', 'mode', 'phase'], + type: 'object', + }, + MothershipStreamV1ToolCallDescriptor: { + additionalProperties: false, + properties: { + arguments: { + $ref: '#/$defs/MothershipStreamV1AdditionalPropertiesMap', + }, + executor: { + $ref: '#/$defs/MothershipStreamV1ToolExecutor', + }, + mode: { + $ref: '#/$defs/MothershipStreamV1ToolMode', + }, + partial: { + type: 'boolean', + }, + phase: { + enum: ['call'], + type: 'string', + }, + requiresConfirmation: { + type: 'boolean', + }, + status: { + $ref: '#/$defs/MothershipStreamV1ToolStatus', + }, + toolCallId: { + type: 'string', + }, + toolName: { + type: 'string', + }, + ui: { + $ref: '#/$defs/MothershipStreamV1ToolUI', + }, }, - "required": [ - "toolCallId", - "toolName", - "executor", - "mode", - "phase" - ], - "type": "object" - }, - "MothershipStreamV1ToolCallEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1ToolCallDescriptor" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "tool" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['toolCallId', 'toolName', 'executor', 'mode', 'phase'], + type: 'object', + }, + MothershipStreamV1ToolCallEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1ToolCallDescriptor', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['tool'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1ToolExecutor": { - "enum": [ - "go", - "sim", - "client" - ], - "type": "string" - }, - "MothershipStreamV1ToolMode": { - "enum": [ - "sync", - "async" - ], - "type": "string" - }, - "MothershipStreamV1ToolOutcome": { - "enum": [ - "success", - "error", - "cancelled", - "skipped", - "rejected" - ], - "type": "string" - }, - "MothershipStreamV1ToolPhase": { - "enum": [ - "call", - "args_delta", - "result" - ], - "type": "string" - }, - "MothershipStreamV1ToolResultEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1ToolResultPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "tool" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1ToolExecutor: { + enum: ['go', 'sim', 'client'], + type: 'string', + }, + MothershipStreamV1ToolMode: { + enum: ['sync', 'async'], + type: 'string', + }, + MothershipStreamV1ToolOutcome: { + enum: ['success', 'error', 'cancelled', 'skipped', 'rejected'], + type: 'string', + }, + MothershipStreamV1ToolPhase: { + enum: ['call', 'args_delta', 'result'], + type: 'string', + }, + MothershipStreamV1ToolResultEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1ToolResultPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['tool'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1ToolResultPayload": { - "additionalProperties": false, - "properties": { - "error": { - "type": "string" - }, - "executor": { - "$ref": "#/$defs/MothershipStreamV1ToolExecutor" - }, - "mode": { - "$ref": "#/$defs/MothershipStreamV1ToolMode" - }, - "output": true, - "phase": { - "enum": [ - "result" - ], - "type": "string" - }, - "status": { - "$ref": "#/$defs/MothershipStreamV1ToolStatus" - }, - "success": { - "type": "boolean" - }, - "toolCallId": { - "type": "string" - }, - "toolName": { - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1ToolResultPayload: { + additionalProperties: false, + properties: { + error: { + type: 'string', + }, + executor: { + $ref: '#/$defs/MothershipStreamV1ToolExecutor', + }, + mode: { + $ref: '#/$defs/MothershipStreamV1ToolMode', + }, + output: true, + phase: { + enum: ['result'], + type: 'string', + }, + status: { + $ref: '#/$defs/MothershipStreamV1ToolStatus', + }, + success: { + type: 'boolean', + }, + toolCallId: { + type: 'string', + }, + toolName: { + type: 'string', + }, }, - "required": [ - "toolCallId", - "toolName", - "executor", - "mode", - "phase", - "success" - ], - "type": "object" - }, - "MothershipStreamV1ToolStatus": { - "enum": [ - "generating", - "executing", - "success", - "error", - "cancelled", - "skipped", - "rejected" - ], - "type": "string" - }, - "MothershipStreamV1ToolUI": { - "additionalProperties": false, - "properties": { - "clientExecutable": { - "type": "boolean" - }, - "hidden": { - "type": "boolean" - }, - "icon": { - "type": "string" - }, - "internal": { - "type": "boolean" - }, - "phaseLabel": { - "type": "string" - }, - "requiresConfirmation": { - "type": "boolean" - }, - "title": { - "type": "string" - } + required: ['toolCallId', 'toolName', 'executor', 'mode', 'phase', 'success'], + type: 'object', + }, + MothershipStreamV1ToolStatus: { + enum: ['generating', 'executing', 'success', 'error', 'cancelled', 'skipped', 'rejected'], + type: 'string', + }, + MothershipStreamV1ToolUI: { + additionalProperties: false, + properties: { + clientExecutable: { + type: 'boolean', + }, + hidden: { + type: 'boolean', + }, + icon: { + type: 'string', + }, + internal: { + type: 'boolean', + }, + phaseLabel: { + type: 'string', + }, + requiresConfirmation: { + type: 'boolean', + }, + title: { + type: 'string', + }, }, - "type": "object" + type: 'object', }, - "MothershipStreamV1Trace": { - "additionalProperties": false, - "properties": { - "goTraceId": { - "description": "OTel trace ID from the first Go ingress. May differ from requestId when Sim assigns the canonical request identity.", - "type": "string" + MothershipStreamV1Trace: { + additionalProperties: false, + properties: { + goTraceId: { + description: + 'OTel trace ID from the first Go ingress. May differ from requestId when Sim assigns the canonical request identity.', + type: 'string', }, - "requestId": { - "type": "string" + requestId: { + type: 'string', + }, + spanId: { + type: 'string', }, - "spanId": { - "type": "string" - } }, - "required": [ - "requestId" - ], - "type": "object" + required: ['requestId'], + type: 'object', }, - "MothershipStreamV1UsageData": { - "additionalProperties": false, - "properties": { - "cache_creation_input_tokens": { - "type": "integer" + MothershipStreamV1UsageData: { + additionalProperties: false, + properties: { + cache_creation_input_tokens: { + type: 'integer', + }, + cache_read_input_tokens: { + type: 'integer', }, - "cache_read_input_tokens": { - "type": "integer" + input_tokens: { + type: 'integer', }, - "input_tokens": { - "type": "integer" + model: { + type: 'string', }, - "model": { - "type": "string" + output_tokens: { + type: 'integer', }, - "output_tokens": { - "type": "integer" + total_tokens: { + type: 'integer', }, - "total_tokens": { - "type": "integer" - } }, - "type": "object" - } + type: 'object', + }, }, - "$id": "mothership-stream-v1.schema.json", - "$schema": "https://json-schema.org/draft/2020-12/schema", - "description": "Shared execution-oriented mothership stream contract from Go to Sim.", - "oneOf": [ + $id: 'mothership-stream-v1.schema.json', + $schema: 'https://json-schema.org/draft/2020-12/schema', + description: 'Shared execution-oriented mothership stream contract from Go to Sim.', + oneOf: [ { - "$ref": "#/$defs/MothershipStreamV1SessionStartEventEnvelope" + $ref: '#/$defs/MothershipStreamV1SessionStartEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1SessionChatEventEnvelope" + $ref: '#/$defs/MothershipStreamV1SessionChatEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1SessionTitleEventEnvelope" + $ref: '#/$defs/MothershipStreamV1SessionTitleEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1SessionTraceEventEnvelope" + $ref: '#/$defs/MothershipStreamV1SessionTraceEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1TextEventEnvelope" + $ref: '#/$defs/MothershipStreamV1TextEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1ToolCallEventEnvelope" + $ref: '#/$defs/MothershipStreamV1ToolCallEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1ToolArgsDeltaEventEnvelope" + $ref: '#/$defs/MothershipStreamV1ToolArgsDeltaEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1ToolResultEventEnvelope" + $ref: '#/$defs/MothershipStreamV1ToolResultEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1SubagentSpanStartEventEnvelope" + $ref: '#/$defs/MothershipStreamV1SubagentSpanStartEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1SubagentSpanEndEventEnvelope" + $ref: '#/$defs/MothershipStreamV1SubagentSpanEndEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1StructuredResultSpanEventEnvelope" + $ref: '#/$defs/MothershipStreamV1StructuredResultSpanEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1SubagentResultSpanEventEnvelope" + $ref: '#/$defs/MothershipStreamV1SubagentResultSpanEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1ResourceUpsertEventEnvelope" + $ref: '#/$defs/MothershipStreamV1ResourceUpsertEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1ResourceRemoveEventEnvelope" + $ref: '#/$defs/MothershipStreamV1ResourceRemoveEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1CheckpointPauseEventEnvelope" + $ref: '#/$defs/MothershipStreamV1CheckpointPauseEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1RunResumedEventEnvelope" + $ref: '#/$defs/MothershipStreamV1RunResumedEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1CompactionStartEventEnvelope" + $ref: '#/$defs/MothershipStreamV1CompactionStartEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1CompactionDoneEventEnvelope" + $ref: '#/$defs/MothershipStreamV1CompactionDoneEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1ErrorEventEnvelope" + $ref: '#/$defs/MothershipStreamV1ErrorEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1CompleteEventEnvelope" - } + $ref: '#/$defs/MothershipStreamV1CompleteEventEnvelope', + }, ], - "title": "MothershipStreamV1EventEnvelope" + title: 'MothershipStreamV1EventEnvelope', } diff --git a/apps/sim/lib/copilot/generated/mothership-stream-v1.ts b/apps/sim/lib/copilot/generated/mothership-stream-v1.ts index 95d86b3823d..ef7f2e065fb 100644 --- a/apps/sim/lib/copilot/generated/mothership-stream-v1.ts +++ b/apps/sim/lib/copilot/generated/mothership-stream-v1.ts @@ -24,512 +24,534 @@ export type MothershipStreamV1EventEnvelope = | MothershipStreamV1CompactionStartEventEnvelope | MothershipStreamV1CompactionDoneEventEnvelope | MothershipStreamV1ErrorEventEnvelope - | MothershipStreamV1CompleteEventEnvelope; -export type MothershipStreamV1TextChannel = "assistant" | "thinking"; -export type MothershipStreamV1ToolExecutor = "go" | "sim" | "client"; -export type MothershipStreamV1ToolMode = "sync" | "async"; + | MothershipStreamV1CompleteEventEnvelope +export type MothershipStreamV1TextChannel = 'assistant' | 'thinking' +export type MothershipStreamV1ToolExecutor = 'go' | 'sim' | 'client' +export type MothershipStreamV1ToolMode = 'sync' | 'async' export type MothershipStreamV1ToolStatus = - | "generating" - | "executing" - | "success" - | "error" - | "cancelled" - | "skipped" - | "rejected"; -export type MothershipStreamV1CompletionStatus = "complete" | "error" | "cancelled"; + | 'generating' + | 'executing' + | 'success' + | 'error' + | 'cancelled' + | 'skipped' + | 'rejected' +export type MothershipStreamV1CompletionStatus = 'complete' | 'error' | 'cancelled' export interface MothershipStreamV1SessionStartEventEnvelope { - payload: MothershipStreamV1SessionStartPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "session"; - v: 1; + payload: MothershipStreamV1SessionStartPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'session' + v: 1 } export interface MothershipStreamV1SessionStartPayload { - data?: MothershipStreamV1SessionStartData; - kind: "start"; + data?: MothershipStreamV1SessionStartData + kind: 'start' } export interface MothershipStreamV1SessionStartData { - responseId?: string; + responseId?: string } export interface MothershipStreamV1StreamScope { - agentId?: string; - lane: "subagent"; - parentToolCallId?: string; + agentId?: string + lane: 'subagent' + parentToolCallId?: string } export interface MothershipStreamV1StreamRef { - chatId?: string; - cursor?: string; - streamId: string; + chatId?: string + cursor?: string + streamId: string } export interface MothershipStreamV1Trace { /** * OTel trace ID from the first Go ingress. May differ from requestId when Sim assigns the canonical request identity. */ - goTraceId?: string; - requestId: string; - spanId?: string; + goTraceId?: string + requestId: string + spanId?: string } export interface MothershipStreamV1SessionChatEventEnvelope { - payload: MothershipStreamV1SessionChatPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "session"; - v: 1; + payload: MothershipStreamV1SessionChatPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'session' + v: 1 } export interface MothershipStreamV1SessionChatPayload { - chatId: string; - kind: "chat"; + chatId: string + kind: 'chat' } export interface MothershipStreamV1SessionTitleEventEnvelope { - payload: MothershipStreamV1SessionTitlePayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "session"; - v: 1; + payload: MothershipStreamV1SessionTitlePayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'session' + v: 1 } export interface MothershipStreamV1SessionTitlePayload { - kind: "title"; - title: string; + kind: 'title' + title: string } export interface MothershipStreamV1SessionTraceEventEnvelope { - payload: MothershipStreamV1SessionTracePayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "session"; - v: 1; + payload: MothershipStreamV1SessionTracePayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'session' + v: 1 } export interface MothershipStreamV1SessionTracePayload { - kind: "trace"; - requestId: string; - spanId?: string; + kind: 'trace' + requestId: string + spanId?: string } export interface MothershipStreamV1TextEventEnvelope { - payload: MothershipStreamV1TextPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "text"; - v: 1; + payload: MothershipStreamV1TextPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'text' + v: 1 } export interface MothershipStreamV1TextPayload { - channel: MothershipStreamV1TextChannel; - text: string; + channel: MothershipStreamV1TextChannel + text: string } export interface MothershipStreamV1ToolCallEventEnvelope { - payload: MothershipStreamV1ToolCallDescriptor; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "tool"; - v: 1; + payload: MothershipStreamV1ToolCallDescriptor + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'tool' + v: 1 } export interface MothershipStreamV1ToolCallDescriptor { - arguments?: MothershipStreamV1AdditionalPropertiesMap; - executor: MothershipStreamV1ToolExecutor; - mode: MothershipStreamV1ToolMode; - partial?: boolean; - phase: "call"; - requiresConfirmation?: boolean; - status?: MothershipStreamV1ToolStatus; - toolCallId: string; - toolName: string; - ui?: MothershipStreamV1ToolUI; + arguments?: MothershipStreamV1AdditionalPropertiesMap + executor: MothershipStreamV1ToolExecutor + mode: MothershipStreamV1ToolMode + partial?: boolean + phase: 'call' + requiresConfirmation?: boolean + status?: MothershipStreamV1ToolStatus + toolCallId: string + toolName: string + ui?: MothershipStreamV1ToolUI } export interface MothershipStreamV1AdditionalPropertiesMap { - [k: string]: unknown; + [k: string]: unknown } export interface MothershipStreamV1ToolUI { - clientExecutable?: boolean; - hidden?: boolean; - icon?: string; - internal?: boolean; - phaseLabel?: string; - requiresConfirmation?: boolean; - title?: string; + clientExecutable?: boolean + hidden?: boolean + icon?: string + internal?: boolean + phaseLabel?: string + requiresConfirmation?: boolean + title?: string } export interface MothershipStreamV1ToolArgsDeltaEventEnvelope { - payload: MothershipStreamV1ToolArgsDeltaPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "tool"; - v: 1; + payload: MothershipStreamV1ToolArgsDeltaPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'tool' + v: 1 } export interface MothershipStreamV1ToolArgsDeltaPayload { - argumentsDelta: string; - executor: MothershipStreamV1ToolExecutor; - mode: MothershipStreamV1ToolMode; - phase: "args_delta"; - toolCallId: string; - toolName: string; + argumentsDelta: string + executor: MothershipStreamV1ToolExecutor + mode: MothershipStreamV1ToolMode + phase: 'args_delta' + toolCallId: string + toolName: string } export interface MothershipStreamV1ToolResultEventEnvelope { - payload: MothershipStreamV1ToolResultPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "tool"; - v: 1; + payload: MothershipStreamV1ToolResultPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'tool' + v: 1 } export interface MothershipStreamV1ToolResultPayload { - error?: string; - executor: MothershipStreamV1ToolExecutor; - mode: MothershipStreamV1ToolMode; - output?: unknown; - phase: "result"; - status?: MothershipStreamV1ToolStatus; - success: boolean; - toolCallId: string; - toolName: string; + error?: string + executor: MothershipStreamV1ToolExecutor + mode: MothershipStreamV1ToolMode + output?: unknown + phase: 'result' + status?: MothershipStreamV1ToolStatus + success: boolean + toolCallId: string + toolName: string } export interface MothershipStreamV1SubagentSpanStartEventEnvelope { - payload: MothershipStreamV1SubagentSpanStartPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "span"; - v: 1; + payload: MothershipStreamV1SubagentSpanStartPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'span' + v: 1 } export interface MothershipStreamV1SubagentSpanStartPayload { - agent?: string; - data?: unknown; - event: "start"; - kind: "subagent"; + agent?: string + data?: unknown + event: 'start' + kind: 'subagent' } export interface MothershipStreamV1SubagentSpanEndEventEnvelope { - payload: MothershipStreamV1SubagentSpanEndPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "span"; - v: 1; + payload: MothershipStreamV1SubagentSpanEndPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'span' + v: 1 } export interface MothershipStreamV1SubagentSpanEndPayload { - agent?: string; - data?: unknown; - event: "end"; - kind: "subagent"; + agent?: string + data?: unknown + event: 'end' + kind: 'subagent' } export interface MothershipStreamV1StructuredResultSpanEventEnvelope { - payload: MothershipStreamV1StructuredResultSpanPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "span"; - v: 1; + payload: MothershipStreamV1StructuredResultSpanPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'span' + v: 1 } export interface MothershipStreamV1StructuredResultSpanPayload { - agent?: string; - data?: unknown; - kind: "structured_result"; + agent?: string + data?: unknown + kind: 'structured_result' } export interface MothershipStreamV1SubagentResultSpanEventEnvelope { - payload: MothershipStreamV1SubagentResultSpanPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "span"; - v: 1; + payload: MothershipStreamV1SubagentResultSpanPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'span' + v: 1 } export interface MothershipStreamV1SubagentResultSpanPayload { - agent?: string; - data?: unknown; - kind: "subagent_result"; + agent?: string + data?: unknown + kind: 'subagent_result' } export interface MothershipStreamV1ResourceUpsertEventEnvelope { - payload: MothershipStreamV1ResourceUpsertPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "resource"; - v: 1; + payload: MothershipStreamV1ResourceUpsertPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'resource' + v: 1 } export interface MothershipStreamV1ResourceUpsertPayload { - op: "upsert"; - resource: MothershipStreamV1ResourceDescriptor; + op: 'upsert' + resource: MothershipStreamV1ResourceDescriptor } export interface MothershipStreamV1ResourceDescriptor { - id: string; - title?: string; - type: string; + id: string + title?: string + type: string } export interface MothershipStreamV1ResourceRemoveEventEnvelope { - payload: MothershipStreamV1ResourceRemovePayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "resource"; - v: 1; + payload: MothershipStreamV1ResourceRemovePayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'resource' + v: 1 } export interface MothershipStreamV1ResourceRemovePayload { - op: "remove"; - resource: MothershipStreamV1ResourceDescriptor; + op: 'remove' + resource: MothershipStreamV1ResourceDescriptor } export interface MothershipStreamV1CheckpointPauseEventEnvelope { - payload: MothershipStreamV1CheckpointPausePayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "run"; - v: 1; + payload: MothershipStreamV1CheckpointPausePayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'run' + v: 1 } export interface MothershipStreamV1CheckpointPausePayload { - checkpointId: string; - executionId: string; - frames?: MothershipStreamV1CheckpointPauseFrame[]; - kind: "checkpoint_pause"; - pendingToolCallIds: string[]; - runId: string; + checkpointId: string + executionId: string + frames?: MothershipStreamV1CheckpointPauseFrame[] + kind: 'checkpoint_pause' + pendingToolCallIds: string[] + runId: string } export interface MothershipStreamV1CheckpointPauseFrame { - parentToolCallId: string; - parentToolName: string; - pendingToolIds: string[]; + parentToolCallId: string + parentToolName: string + pendingToolIds: string[] } export interface MothershipStreamV1RunResumedEventEnvelope { - payload: MothershipStreamV1RunResumedPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "run"; - v: 1; + payload: MothershipStreamV1RunResumedPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'run' + v: 1 } export interface MothershipStreamV1RunResumedPayload { - kind: "resumed"; + kind: 'resumed' } export interface MothershipStreamV1CompactionStartEventEnvelope { - payload: MothershipStreamV1CompactionStartPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "run"; - v: 1; + payload: MothershipStreamV1CompactionStartPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'run' + v: 1 } export interface MothershipStreamV1CompactionStartPayload { - kind: "compaction_start"; + kind: 'compaction_start' } export interface MothershipStreamV1CompactionDoneEventEnvelope { - payload: MothershipStreamV1CompactionDonePayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "run"; - v: 1; + payload: MothershipStreamV1CompactionDonePayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'run' + v: 1 } export interface MothershipStreamV1CompactionDonePayload { - data?: MothershipStreamV1CompactionDoneData; - kind: "compaction_done"; + data?: MothershipStreamV1CompactionDoneData + kind: 'compaction_done' } export interface MothershipStreamV1CompactionDoneData { - summary_chars: number; + summary_chars: number } export interface MothershipStreamV1ErrorEventEnvelope { - payload: MothershipStreamV1ErrorPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "error"; - v: 1; + payload: MothershipStreamV1ErrorPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'error' + v: 1 } export interface MothershipStreamV1ErrorPayload { - code?: string; - data?: unknown; - displayMessage?: string; - error?: string; - message: string; - provider?: string; + code?: string + data?: unknown + displayMessage?: string + error?: string + message: string + provider?: string } export interface MothershipStreamV1CompleteEventEnvelope { - payload: MothershipStreamV1CompletePayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "complete"; - v: 1; + payload: MothershipStreamV1CompletePayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'complete' + v: 1 } export interface MothershipStreamV1CompletePayload { - cost?: MothershipStreamV1CostData; - reason?: string; - response?: unknown; - status: MothershipStreamV1CompletionStatus; - usage?: MothershipStreamV1UsageData; + cost?: MothershipStreamV1CostData + reason?: string + response?: unknown + status: MothershipStreamV1CompletionStatus + usage?: MothershipStreamV1UsageData } export interface MothershipStreamV1CostData { - input?: number; - output?: number; - total?: number; + input?: number + output?: number + total?: number } export interface MothershipStreamV1UsageData { - cache_creation_input_tokens?: number; - cache_read_input_tokens?: number; - input_tokens?: number; - model?: string; - output_tokens?: number; - total_tokens?: number; + cache_creation_input_tokens?: number + cache_read_input_tokens?: number + input_tokens?: number + model?: string + output_tokens?: number + total_tokens?: number } -export type MothershipStreamV1AsyncToolRecordStatus = "pending" | "running" | "completed" | "failed" | "cancelled" | "delivered" +export type MothershipStreamV1AsyncToolRecordStatus = + | 'pending' + | 'running' + | 'completed' + | 'failed' + | 'cancelled' + | 'delivered' export const MothershipStreamV1AsyncToolRecordStatus = { - "pending": "pending", - "running": "running", - "completed": "completed", - "failed": "failed", - "cancelled": "cancelled", - "delivered": "delivered", -} as const; + pending: 'pending', + running: 'running', + completed: 'completed', + failed: 'failed', + cancelled: 'cancelled', + delivered: 'delivered', +} as const export const MothershipStreamV1CompletionStatus = { - "complete": "complete", - "error": "error", - "cancelled": "cancelled", -} as const; + complete: 'complete', + error: 'error', + cancelled: 'cancelled', +} as const -export type MothershipStreamV1EventType = "session" | "text" | "tool" | "span" | "resource" | "run" | "error" | "complete" +export type MothershipStreamV1EventType = + | 'session' + | 'text' + | 'tool' + | 'span' + | 'resource' + | 'run' + | 'error' + | 'complete' export const MothershipStreamV1EventType = { - "session": "session", - "text": "text", - "tool": "tool", - "span": "span", - "resource": "resource", - "run": "run", - "error": "error", - "complete": "complete", -} as const; + session: 'session', + text: 'text', + tool: 'tool', + span: 'span', + resource: 'resource', + run: 'run', + error: 'error', + complete: 'complete', +} as const -export type MothershipStreamV1ResourceOp = "upsert" | "remove" +export type MothershipStreamV1ResourceOp = 'upsert' | 'remove' export const MothershipStreamV1ResourceOp = { - "upsert": "upsert", - "remove": "remove", -} as const; + upsert: 'upsert', + remove: 'remove', +} as const -export type MothershipStreamV1RunKind = "checkpoint_pause" | "resumed" | "compaction_start" | "compaction_done" +export type MothershipStreamV1RunKind = + | 'checkpoint_pause' + | 'resumed' + | 'compaction_start' + | 'compaction_done' export const MothershipStreamV1RunKind = { - "checkpoint_pause": "checkpoint_pause", - "resumed": "resumed", - "compaction_start": "compaction_start", - "compaction_done": "compaction_done", -} as const; + checkpoint_pause: 'checkpoint_pause', + resumed: 'resumed', + compaction_start: 'compaction_start', + compaction_done: 'compaction_done', +} as const -export type MothershipStreamV1SessionKind = "trace" | "chat" | "title" | "start" +export type MothershipStreamV1SessionKind = 'trace' | 'chat' | 'title' | 'start' export const MothershipStreamV1SessionKind = { - "trace": "trace", - "chat": "chat", - "title": "title", - "start": "start", -} as const; + trace: 'trace', + chat: 'chat', + title: 'title', + start: 'start', +} as const -export type MothershipStreamV1SpanKind = "subagent" +export type MothershipStreamV1SpanKind = 'subagent' export const MothershipStreamV1SpanKind = { - "subagent": "subagent", -} as const; + subagent: 'subagent', +} as const -export type MothershipStreamV1SpanLifecycleEvent = "start" | "end" +export type MothershipStreamV1SpanLifecycleEvent = 'start' | 'end' export const MothershipStreamV1SpanLifecycleEvent = { - "start": "start", - "end": "end", -} as const; + start: 'start', + end: 'end', +} as const -export type MothershipStreamV1SpanPayloadKind = "subagent" | "structured_result" | "subagent_result" +export type MothershipStreamV1SpanPayloadKind = 'subagent' | 'structured_result' | 'subagent_result' export const MothershipStreamV1SpanPayloadKind = { - "subagent": "subagent", - "structured_result": "structured_result", - "subagent_result": "subagent_result", -} as const; + subagent: 'subagent', + structured_result: 'structured_result', + subagent_result: 'subagent_result', +} as const export const MothershipStreamV1TextChannel = { - "assistant": "assistant", - "thinking": "thinking", -} as const; + assistant: 'assistant', + thinking: 'thinking', +} as const export const MothershipStreamV1ToolExecutor = { - "go": "go", - "sim": "sim", - "client": "client", -} as const; + go: 'go', + sim: 'sim', + client: 'client', +} as const export const MothershipStreamV1ToolMode = { - "sync": "sync", - "async": "async", -} as const; + sync: 'sync', + async: 'async', +} as const -export type MothershipStreamV1ToolOutcome = "success" | "error" | "cancelled" | "skipped" | "rejected" +export type MothershipStreamV1ToolOutcome = + | 'success' + | 'error' + | 'cancelled' + | 'skipped' + | 'rejected' export const MothershipStreamV1ToolOutcome = { - "success": "success", - "error": "error", - "cancelled": "cancelled", - "skipped": "skipped", - "rejected": "rejected", -} as const; + success: 'success', + error: 'error', + cancelled: 'cancelled', + skipped: 'skipped', + rejected: 'rejected', +} as const -export type MothershipStreamV1ToolPhase = "call" | "args_delta" | "result" +export type MothershipStreamV1ToolPhase = 'call' | 'args_delta' | 'result' export const MothershipStreamV1ToolPhase = { - "call": "call", - "args_delta": "args_delta", - "result": "result", -} as const; + call: 'call', + args_delta: 'args_delta', + result: 'result', +} as const export const MothershipStreamV1ToolStatus = { - "generating": "generating", - "executing": "executing", - "success": "success", - "error": "error", - "cancelled": "cancelled", - "skipped": "skipped", - "rejected": "rejected", -} as const; - + generating: 'generating', + executing: 'executing', + success: 'success', + error: 'error', + cancelled: 'cancelled', + skipped: 'skipped', + rejected: 'rejected', +} as const diff --git a/apps/sim/lib/copilot/generated/trace-spans-v1.ts b/apps/sim/lib/copilot/generated/trace-spans-v1.ts index c6c383beb9c..8785699f918 100644 --- a/apps/sim/lib/copilot/generated/trace-spans-v1.ts +++ b/apps/sim/lib/copilot/generated/trace-spans-v1.ts @@ -9,99 +9,99 @@ // single source of truth and typos become compile errors. export const TraceSpan = { - AnthropicCountTokens: "anthropic.count_tokens", - AsyncToolStoreSet: "async_tool_store.set", - AuthValidateKey: "auth.validate_key", - ChatContinueWithToolResults: "chat.continue_with_tool_results", - ContextReduce: "context.reduce", - ContextSummarizeChunk: "context.summarize_chunk", - CopilotAnalyticsFlush: "copilot.analytics.flush", - CopilotAnalyticsSaveRequest: "copilot.analytics.save_request", - CopilotAnalyticsUpdateBilling: "copilot.analytics.update_billing", - CopilotAsyncRunsClaimCompleted: "copilot.async_runs.claim_completed", - CopilotAsyncRunsCreateRunCheckpoint: "copilot.async_runs.create_run_checkpoint", - CopilotAsyncRunsCreateRunSegment: "copilot.async_runs.create_run_segment", - CopilotAsyncRunsGetAsyncToolCall: "copilot.async_runs.get_async_tool_call", - CopilotAsyncRunsGetLatestForExecution: "copilot.async_runs.get_latest_for_execution", - CopilotAsyncRunsGetLatestForStream: "copilot.async_runs.get_latest_for_stream", - CopilotAsyncRunsGetMany: "copilot.async_runs.get_many", - CopilotAsyncRunsGetRunSegment: "copilot.async_runs.get_run_segment", - CopilotAsyncRunsListForRun: "copilot.async_runs.list_for_run", - CopilotAsyncRunsMarkAsyncToolStatus: "copilot.async_runs.mark_async_tool_status", - CopilotAsyncRunsReleaseClaim: "copilot.async_runs.release_claim", - CopilotAsyncRunsUpdateRunStatus: "copilot.async_runs.update_run_status", - CopilotAsyncRunsUpsertAsyncToolCall: "copilot.async_runs.upsert_async_tool_call", - CopilotAuthValidateApiKey: "copilot.auth.validate_api_key", - CopilotBillingUpdateCost: "copilot.billing.update_cost", - CopilotChatFinalizeAssistantTurn: "copilot.chat.finalize_assistant_turn", - CopilotChatPersistUserMessage: "copilot.chat.persist_user_message", - CopilotFinalizeStream: "copilot.finalize_stream", - CopilotRecoveryCheckReplayGap: "copilot.recovery.check_replay_gap", - CopilotResumeRequest: "copilot.resume.request", - CopilotSubagentExecute: "copilot.subagent.execute", - CopilotToolsHandleResourceSideEffects: "copilot.tools.handle_resource_side_effects", - CopilotToolsWriteCsvToTable: "copilot.tools.write_csv_to_table", - CopilotToolsWriteOutputFile: "copilot.tools.write_output_file", - CopilotToolsWriteOutputTable: "copilot.tools.write_output_table", - CopilotVfsPrepareImage: "copilot.vfs.prepare_image", - CopilotVfsReadFile: "copilot.vfs.read_file", - GenAiAgentExecute: "gen_ai.agent.execute", - LlmStream: "llm.stream", - ProviderRouterCountTokens: "provider.router.count_tokens", - ProviderRouterRoute: "provider.router.route", - SimUpdateCost: "sim.update_cost", - SimValidateApiKey: "sim.validate_api_key", - ToolAsyncWaiterWait: "tool.async_waiter.wait", - ToolExecute: "tool.execute", -} as const; + AnthropicCountTokens: 'anthropic.count_tokens', + AsyncToolStoreSet: 'async_tool_store.set', + AuthValidateKey: 'auth.validate_key', + ChatContinueWithToolResults: 'chat.continue_with_tool_results', + ContextReduce: 'context.reduce', + ContextSummarizeChunk: 'context.summarize_chunk', + CopilotAnalyticsFlush: 'copilot.analytics.flush', + CopilotAnalyticsSaveRequest: 'copilot.analytics.save_request', + CopilotAnalyticsUpdateBilling: 'copilot.analytics.update_billing', + CopilotAsyncRunsClaimCompleted: 'copilot.async_runs.claim_completed', + CopilotAsyncRunsCreateRunCheckpoint: 'copilot.async_runs.create_run_checkpoint', + CopilotAsyncRunsCreateRunSegment: 'copilot.async_runs.create_run_segment', + CopilotAsyncRunsGetAsyncToolCall: 'copilot.async_runs.get_async_tool_call', + CopilotAsyncRunsGetLatestForExecution: 'copilot.async_runs.get_latest_for_execution', + CopilotAsyncRunsGetLatestForStream: 'copilot.async_runs.get_latest_for_stream', + CopilotAsyncRunsGetMany: 'copilot.async_runs.get_many', + CopilotAsyncRunsGetRunSegment: 'copilot.async_runs.get_run_segment', + CopilotAsyncRunsListForRun: 'copilot.async_runs.list_for_run', + CopilotAsyncRunsMarkAsyncToolStatus: 'copilot.async_runs.mark_async_tool_status', + CopilotAsyncRunsReleaseClaim: 'copilot.async_runs.release_claim', + CopilotAsyncRunsUpdateRunStatus: 'copilot.async_runs.update_run_status', + CopilotAsyncRunsUpsertAsyncToolCall: 'copilot.async_runs.upsert_async_tool_call', + CopilotAuthValidateApiKey: 'copilot.auth.validate_api_key', + CopilotBillingUpdateCost: 'copilot.billing.update_cost', + CopilotChatFinalizeAssistantTurn: 'copilot.chat.finalize_assistant_turn', + CopilotChatPersistUserMessage: 'copilot.chat.persist_user_message', + CopilotFinalizeStream: 'copilot.finalize_stream', + CopilotRecoveryCheckReplayGap: 'copilot.recovery.check_replay_gap', + CopilotResumeRequest: 'copilot.resume.request', + CopilotSubagentExecute: 'copilot.subagent.execute', + CopilotToolsHandleResourceSideEffects: 'copilot.tools.handle_resource_side_effects', + CopilotToolsWriteCsvToTable: 'copilot.tools.write_csv_to_table', + CopilotToolsWriteOutputFile: 'copilot.tools.write_output_file', + CopilotToolsWriteOutputTable: 'copilot.tools.write_output_table', + CopilotVfsPrepareImage: 'copilot.vfs.prepare_image', + CopilotVfsReadFile: 'copilot.vfs.read_file', + GenAiAgentExecute: 'gen_ai.agent.execute', + LlmStream: 'llm.stream', + ProviderRouterCountTokens: 'provider.router.count_tokens', + ProviderRouterRoute: 'provider.router.route', + SimUpdateCost: 'sim.update_cost', + SimValidateApiKey: 'sim.validate_api_key', + ToolAsyncWaiterWait: 'tool.async_waiter.wait', + ToolExecute: 'tool.execute', +} as const -export type TraceSpanKey = keyof typeof TraceSpan; -export type TraceSpanValue = (typeof TraceSpan)[TraceSpanKey]; +export type TraceSpanKey = keyof typeof TraceSpan +export type TraceSpanValue = (typeof TraceSpan)[TraceSpanKey] /** Readonly sorted list of every canonical span name. */ export const TraceSpanValues: readonly TraceSpanValue[] = [ - "anthropic.count_tokens", - "async_tool_store.set", - "auth.validate_key", - "chat.continue_with_tool_results", - "context.reduce", - "context.summarize_chunk", - "copilot.analytics.flush", - "copilot.analytics.save_request", - "copilot.analytics.update_billing", - "copilot.async_runs.claim_completed", - "copilot.async_runs.create_run_checkpoint", - "copilot.async_runs.create_run_segment", - "copilot.async_runs.get_async_tool_call", - "copilot.async_runs.get_latest_for_execution", - "copilot.async_runs.get_latest_for_stream", - "copilot.async_runs.get_many", - "copilot.async_runs.get_run_segment", - "copilot.async_runs.list_for_run", - "copilot.async_runs.mark_async_tool_status", - "copilot.async_runs.release_claim", - "copilot.async_runs.update_run_status", - "copilot.async_runs.upsert_async_tool_call", - "copilot.auth.validate_api_key", - "copilot.billing.update_cost", - "copilot.chat.finalize_assistant_turn", - "copilot.chat.persist_user_message", - "copilot.finalize_stream", - "copilot.recovery.check_replay_gap", - "copilot.resume.request", - "copilot.subagent.execute", - "copilot.tools.handle_resource_side_effects", - "copilot.tools.write_csv_to_table", - "copilot.tools.write_output_file", - "copilot.tools.write_output_table", - "copilot.vfs.prepare_image", - "copilot.vfs.read_file", - "gen_ai.agent.execute", - "llm.stream", - "provider.router.count_tokens", - "provider.router.route", - "sim.update_cost", - "sim.validate_api_key", - "tool.async_waiter.wait", - "tool.execute", -] as const; + 'anthropic.count_tokens', + 'async_tool_store.set', + 'auth.validate_key', + 'chat.continue_with_tool_results', + 'context.reduce', + 'context.summarize_chunk', + 'copilot.analytics.flush', + 'copilot.analytics.save_request', + 'copilot.analytics.update_billing', + 'copilot.async_runs.claim_completed', + 'copilot.async_runs.create_run_checkpoint', + 'copilot.async_runs.create_run_segment', + 'copilot.async_runs.get_async_tool_call', + 'copilot.async_runs.get_latest_for_execution', + 'copilot.async_runs.get_latest_for_stream', + 'copilot.async_runs.get_many', + 'copilot.async_runs.get_run_segment', + 'copilot.async_runs.list_for_run', + 'copilot.async_runs.mark_async_tool_status', + 'copilot.async_runs.release_claim', + 'copilot.async_runs.update_run_status', + 'copilot.async_runs.upsert_async_tool_call', + 'copilot.auth.validate_api_key', + 'copilot.billing.update_cost', + 'copilot.chat.finalize_assistant_turn', + 'copilot.chat.persist_user_message', + 'copilot.finalize_stream', + 'copilot.recovery.check_replay_gap', + 'copilot.resume.request', + 'copilot.subagent.execute', + 'copilot.tools.handle_resource_side_effects', + 'copilot.tools.write_csv_to_table', + 'copilot.tools.write_output_file', + 'copilot.tools.write_output_table', + 'copilot.vfs.prepare_image', + 'copilot.vfs.read_file', + 'gen_ai.agent.execute', + 'llm.stream', + 'provider.router.count_tokens', + 'provider.router.route', + 'sim.update_cost', + 'sim.validate_api_key', + 'tool.async_waiter.wait', + 'tool.execute', +] as const diff --git a/apps/sim/lib/copilot/request/go/fetch.test.ts b/apps/sim/lib/copilot/request/go/fetch.test.ts index 446bed79673..9607a995d8e 100644 --- a/apps/sim/lib/copilot/request/go/fetch.test.ts +++ b/apps/sim/lib/copilot/request/go/fetch.test.ts @@ -1,91 +1,79 @@ -import { trace } from "@opentelemetry/api"; +import { trace } from '@opentelemetry/api' import { BasicTracerProvider, InMemorySpanExporter, SimpleSpanProcessor, -} from "@opentelemetry/sdk-trace-base"; -import { beforeEach, describe, expect, it, vi } from "vitest"; -import { fetchGo } from "@/lib/copilot/request/go/fetch"; +} from '@opentelemetry/sdk-trace-base' +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { fetchGo } from '@/lib/copilot/request/go/fetch' -describe("fetchGo", () => { - const exporter = new InMemorySpanExporter(); +describe('fetchGo', () => { + const exporter = new InMemorySpanExporter() const provider = new BasicTracerProvider({ spanProcessors: [new SimpleSpanProcessor(exporter)], - }); + }) beforeEach(() => { - exporter.reset(); - trace.setGlobalTracerProvider(provider); - vi.restoreAllMocks(); - }); + exporter.reset() + trace.setGlobalTracerProvider(provider) + vi.restoreAllMocks() + }) - it("emits a client span with http.* attrs and injects traceparent", async () => { - const fetchMock = vi - .fn() - .mockImplementation(async (_url: string, init: RequestInit) => { - const headers = init.headers as Record; - expect(headers.traceparent).toMatch( - /^00-[0-9a-f]{32}-[0-9a-f]{16}-0[0-9a-f]$/, - ); - return new Response("ok", { - status: 200, - headers: { "content-length": "2" }, - }); - }); - vi.stubGlobal("fetch", fetchMock); + it('emits a client span with http.* attrs and injects traceparent', async () => { + const fetchMock = vi.fn().mockImplementation(async (_url: string, init: RequestInit) => { + const headers = init.headers as Record + expect(headers.traceparent).toMatch(/^00-[0-9a-f]{32}-[0-9a-f]{16}-0[0-9a-f]$/) + return new Response('ok', { + status: 200, + headers: { 'content-length': '2' }, + }) + }) + vi.stubGlobal('fetch', fetchMock) - const res = await fetchGo("https://backend.example.com/api/copilot", { - method: "POST", - body: "payload", - operation: "stream", - attributes: { "copilot.leg": "sim_to_go" }, - }); - expect(res.status).toBe(200); + const res = await fetchGo('https://backend.example.com/api/copilot', { + method: 'POST', + body: 'payload', + operation: 'stream', + attributes: { 'copilot.leg': 'sim_to_go' }, + }) + expect(res.status).toBe(200) - const spans = exporter.getFinishedSpans(); - expect(spans).toHaveLength(1); - const attrs = spans[0].attributes; - expect(spans[0].name).toBe("sim → go /api/copilot"); - expect(attrs["http.method"]).toBe("POST"); - expect(attrs["http.url"]).toBe( - "https://backend.example.com/api/copilot", - ); - expect(attrs["http.target"]).toBe("/api/copilot"); - expect(attrs["http.status_code"]).toBe(200); - expect(attrs["copilot.operation"]).toBe("stream"); - expect(attrs["copilot.leg"]).toBe("sim_to_go"); - expect(typeof attrs["http.response.headers_ms"]).toBe("number"); - }); + const spans = exporter.getFinishedSpans() + expect(spans).toHaveLength(1) + const attrs = spans[0].attributes + expect(spans[0].name).toBe('sim → go /api/copilot') + expect(attrs['http.method']).toBe('POST') + expect(attrs['http.url']).toBe('https://backend.example.com/api/copilot') + expect(attrs['http.target']).toBe('/api/copilot') + expect(attrs['http.status_code']).toBe(200) + expect(attrs['copilot.operation']).toBe('stream') + expect(attrs['copilot.leg']).toBe('sim_to_go') + expect(typeof attrs['http.response.headers_ms']).toBe('number') + }) - it("marks span as error on non-2xx response", async () => { - vi.stubGlobal( - "fetch", - vi.fn().mockResolvedValue(new Response("nope", { status: 500 })), - ); + it('marks span as error on non-2xx response', async () => { + vi.stubGlobal('fetch', vi.fn().mockResolvedValue(new Response('nope', { status: 500 }))) - const res = await fetchGo("https://backend.example.com/api/tools/resume", { - method: "POST", - }); - expect(res.status).toBe(500); + const res = await fetchGo('https://backend.example.com/api/tools/resume', { + method: 'POST', + }) + expect(res.status).toBe(500) - const spans = exporter.getFinishedSpans(); - expect(spans).toHaveLength(1); - expect(spans[0].status.code).toBe(2); - }); + const spans = exporter.getFinishedSpans() + expect(spans).toHaveLength(1) + expect(spans[0].status.code).toBe(2) + }) - it("records exceptions when fetch throws", async () => { - vi.stubGlobal( - "fetch", - vi.fn().mockRejectedValue(new Error("network boom")), - ); + it('records exceptions when fetch throws', async () => { + vi.stubGlobal('fetch', vi.fn().mockRejectedValue(new Error('network boom'))) await expect( - fetchGo("https://backend.example.com/api/traces", { method: "POST" }), - ).rejects.toThrow("network boom"); + fetchGo('https://backend.example.com/api/traces', { method: 'POST' }) + ).rejects.toThrow('network boom') - const spans = exporter.getFinishedSpans(); - expect(spans).toHaveLength(1); - expect(spans[0].status.code).toBe(2); - expect(spans[0].events.some((e) => e.name === "exception")).toBe(true); - }); -}); + const spans = exporter.getFinishedSpans() + expect(spans).toHaveLength(1) + expect(spans[0].status.code).toBe(2) + expect(spans[0].events.some((e) => e.name === 'exception')).toBe(true) + }) +}) diff --git a/apps/sim/lib/copilot/request/go/fetch.ts b/apps/sim/lib/copilot/request/go/fetch.ts index 6560c84f272..f6fff746158 100644 --- a/apps/sim/lib/copilot/request/go/fetch.ts +++ b/apps/sim/lib/copilot/request/go/fetch.ts @@ -1,22 +1,17 @@ -import { - context, - SpanStatusCode, - trace, - type Context, -} from "@opentelemetry/api"; -import { traceHeaders } from "@/lib/copilot/request/go/propagation"; +import { type Context, context, SpanStatusCode, trace } from '@opentelemetry/api' +import { traceHeaders } from '@/lib/copilot/request/go/propagation' // Lazy tracer resolution: module-level `trace.getTracer()` can be evaluated // before `instrumentation-node.ts` installs the TracerProvider under // Next.js 16 + Turbopack dev, freezing a NoOp tracer and silently dropping // every outbound Sim → Go span. Resolving per-call avoids the race. -const getTracer = () => trace.getTracer("sim-copilot-http", "1.0.0"); +const getTracer = () => trace.getTracer('sim-copilot-http', '1.0.0') export interface OutboundFetchOptions extends RequestInit { - otelContext?: Context; - spanName?: string; - operation?: string; - attributes?: Record; + otelContext?: Context + spanName?: string + operation?: string + attributes?: Record } /** @@ -28,10 +23,7 @@ export interface OutboundFetchOptions extends RequestInit { * size, error code) so any future latency investigation — not just images or * Bedrock — has uniform metadata to work with. */ -export async function fetchGo( - url: string, - options: OutboundFetchOptions = {}, -): Promise { +export async function fetchGo(url: string, options: OutboundFetchOptions = {}): Promise { const { otelContext, spanName, @@ -39,83 +31,78 @@ export async function fetchGo( attributes, headers: providedHeaders, ...init - } = options; + } = options - const parsed = safeParseUrl(url); - const pathname = parsed?.pathname ?? url; - const method = (init.method ?? "GET").toUpperCase(); - const parentContext = otelContext ?? context.active(); + const parsed = safeParseUrl(url) + const pathname = parsed?.pathname ?? url + const method = (init.method ?? 'GET').toUpperCase() + const parentContext = otelContext ?? context.active() const span = getTracer().startSpan( spanName ?? `sim → go ${pathname}`, { attributes: { - "http.method": method, - "http.url": url, - "http.target": pathname, - "net.peer.name": parsed?.host ?? "", - "copilot.leg": "sim_to_go", - ...(operation ? { "copilot.operation": operation } : {}), + 'http.method': method, + 'http.url': url, + 'http.target': pathname, + 'net.peer.name': parsed?.host ?? '', + 'copilot.leg': 'sim_to_go', + ...(operation ? { 'copilot.operation': operation } : {}), ...(attributes ?? {}), }, }, - parentContext, - ); + parentContext + ) - const activeContext = trace.setSpan(parentContext, span); - const propagatedHeaders = traceHeaders({}, activeContext); + const activeContext = trace.setSpan(parentContext, span) + const propagatedHeaders = traceHeaders({}, activeContext) const mergedHeaders = { ...(providedHeaders as Record | undefined), ...propagatedHeaders, - }; + } - const start = performance.now(); + const start = performance.now() try { const response = await context.with(activeContext, () => fetch(url, { ...init, method, headers: mergedHeaders, - }), - ); - const elapsedMs = performance.now() - start; - const contentLength = Number(response.headers.get("content-length") ?? 0); - span.setAttribute("http.status_code", response.status); - span.setAttribute("http.response.headers_ms", Math.round(elapsedMs)); + }) + ) + const elapsedMs = performance.now() - start + const contentLength = Number(response.headers.get('content-length') ?? 0) + span.setAttribute('http.status_code', response.status) + span.setAttribute('http.response.headers_ms', Math.round(elapsedMs)) if (contentLength > 0) { - span.setAttribute("http.response.content_length", contentLength); + span.setAttribute('http.response.content_length', contentLength) } if (response.status >= 400) { span.setStatus({ code: SpanStatusCode.ERROR, message: `HTTP ${response.status}`, - }); + }) } else { - span.setStatus({ code: SpanStatusCode.OK }); + span.setStatus({ code: SpanStatusCode.OK }) } - return response; + return response } catch (error) { - span.setAttribute( - "http.response.headers_ms", - Math.round(performance.now() - start), - ); + span.setAttribute('http.response.headers_ms', Math.round(performance.now() - start)) span.setStatus({ code: SpanStatusCode.ERROR, message: error instanceof Error ? error.message : String(error), - }); - span.recordException( - error instanceof Error ? error : new Error(String(error)), - ); - throw error; + }) + span.recordException(error instanceof Error ? error : new Error(String(error))) + throw error } finally { - span.end(); + span.end() } } function safeParseUrl(url: string): URL | null { try { - return new URL(url); + return new URL(url) } catch { - return null; + return null } } diff --git a/apps/sim/lib/copilot/request/go/propagation.ts b/apps/sim/lib/copilot/request/go/propagation.ts index 0a30011a483..51ec28566cc 100644 --- a/apps/sim/lib/copilot/request/go/propagation.ts +++ b/apps/sim/lib/copilot/request/go/propagation.ts @@ -1,25 +1,25 @@ -import { context, type Context } from "@opentelemetry/api"; -import { W3CTraceContextPropagator } from "@opentelemetry/core"; +import { type Context, context } from '@opentelemetry/api' +import { W3CTraceContextPropagator } from '@opentelemetry/core' -const propagator = new W3CTraceContextPropagator(); +const propagator = new W3CTraceContextPropagator() const headerSetter = { set(carrier: Record, key: string, value: string) { - carrier[key] = value; + carrier[key] = value }, -}; +} const headerGetter = { keys(carrier: Headers): string[] { - const out: string[] = []; + const out: string[] = [] carrier.forEach((_, key) => { - out.push(key); - }); - return out; + out.push(key) + }) + return out }, get(carrier: Headers, key: string): string | undefined { - return carrier.get(key) ?? undefined; + return carrier.get(key) ?? undefined }, -}; +} /** * Injects W3C trace context (traceparent, tracestate) into outbound HTTP @@ -31,11 +31,11 @@ const headerGetter = { */ export function traceHeaders( carrier?: Record, - otelContext?: Context, + otelContext?: Context ): Record { - const headers: Record = carrier ?? {}; - propagator.inject(otelContext ?? context.active(), headers, headerSetter); - return headers; + const headers: Record = carrier ?? {} + propagator.inject(otelContext ?? context.active(), headers, headerSetter) + return headers } /** @@ -53,5 +53,5 @@ export function traceHeaders( * behavior as before this helper existed. */ export function contextFromRequestHeaders(headers: Headers): Context { - return propagator.extract(context.active(), headers, headerGetter); + return propagator.extract(context.active(), headers, headerGetter) } diff --git a/apps/sim/lib/copilot/request/go/stream.test.ts b/apps/sim/lib/copilot/request/go/stream.test.ts index 1ea855d47da..f9f80384c8d 100644 --- a/apps/sim/lib/copilot/request/go/stream.test.ts +++ b/apps/sim/lib/copilot/request/go/stream.test.ts @@ -1,7 +1,7 @@ /** * @vitest-environment node */ -import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, @@ -9,65 +9,57 @@ import { MothershipStreamV1ToolMode, MothershipStreamV1ToolOutcome, MothershipStreamV1ToolPhase, -} from "@/lib/copilot/generated/mothership-stream-v1"; +} from '@/lib/copilot/generated/mothership-stream-v1' import { buildPreviewContentUpdate, decodeJsonStringPrefix, extractEditContent, runStreamLoop, -} from "@/lib/copilot/request/go/stream"; -import { createEvent } from "@/lib/copilot/request/session"; -import { - RequestTraceV1Outcome, - TraceCollector, -} from "@/lib/copilot/request/trace"; -import type { - ExecutionContext, - StreamingContext, -} from "@/lib/copilot/request/types"; +} from '@/lib/copilot/request/go/stream' +import { createEvent } from '@/lib/copilot/request/session' +import { RequestTraceV1Outcome, TraceCollector } from '@/lib/copilot/request/trace' +import type { ExecutionContext, StreamingContext } from '@/lib/copilot/request/types' function createSseResponse(events: unknown[]): Response { - const payload = events - .map((event) => `data: ${JSON.stringify(event)}\n\n`) - .join(""); + const payload = events.map((event) => `data: ${JSON.stringify(event)}\n\n`).join('') return new Response( new ReadableStream({ start(controller) { - controller.enqueue(new TextEncoder().encode(payload)); - controller.close(); + controller.enqueue(new TextEncoder().encode(payload)) + controller.close() }, }), { status: 200, headers: { - "Content-Type": "text/event-stream", + 'Content-Type': 'text/event-stream', }, - }, - ); + } + ) } function createRawSseResponse(payload: string): Response { return new Response( new ReadableStream({ start(controller) { - controller.enqueue(new TextEncoder().encode(payload)); - controller.close(); + controller.enqueue(new TextEncoder().encode(payload)) + controller.close() }, }), { status: 200, headers: { - "Content-Type": "text/event-stream", + 'Content-Type': 'text/event-stream', }, - }, - ); + } + ) } function createStreamingContext(): StreamingContext { return { - messageId: "msg-1", - accumulatedContent: "", + messageId: 'msg-1', + accumulatedContent: '', contentBlocks: [], toolCalls: new Map(), pendingToolPromises: new Map(), @@ -78,310 +70,266 @@ function createStreamingContext(): StreamingContext { subAgentParentStack: [], subAgentContent: {}, subAgentToolCalls: {}, - pendingContent: "", + pendingContent: '', streamComplete: false, wasAborted: false, errors: [], activeFileIntent: null, trace: new TraceCollector(), - }; + } } -describe("copilot go stream helpers", () => { +describe('copilot go stream helpers', () => { beforeEach(() => { - vi.stubGlobal("fetch", vi.fn()); - }); + vi.stubGlobal('fetch', vi.fn()) + }) afterEach(() => { - vi.unstubAllGlobals(); - }); + vi.unstubAllGlobals() + }) - it("decodes complete escapes and stops at incomplete unicode escapes", () => { - expect(decodeJsonStringPrefix("hello\\nworld")).toBe("hello\nworld"); - expect(decodeJsonStringPrefix("emoji \\u263A")).toBe("emoji ☺"); - expect(decodeJsonStringPrefix("partial \\u26")).toBe("partial "); - }); + it('decodes complete escapes and stops at incomplete unicode escapes', () => { + expect(decodeJsonStringPrefix('hello\\nworld')).toBe('hello\nworld') + expect(decodeJsonStringPrefix('emoji \\u263A')).toBe('emoji ☺') + expect(decodeJsonStringPrefix('partial \\u26')).toBe('partial ') + }) - it("extracts the streamed edit_content prefix from partial JSON", () => { - expect(extractEditContent('{"content":"hello\\nwor')).toBe("hello\nwor"); - expect(extractEditContent('{"content":"tab\\tvalue"}')).toBe("tab\tvalue"); - }); + it('extracts the streamed edit_content prefix from partial JSON', () => { + expect(extractEditContent('{"content":"hello\\nwor')).toBe('hello\nwor') + expect(extractEditContent('{"content":"tab\\tvalue"}')).toBe('tab\tvalue') + }) - it("emits full snapshots for append (sidebar viewer uses replace mode; no delta merge)", () => { - expect( - buildPreviewContentUpdate("hello", "hello world", 100, 200, "append"), - ).toEqual({ - content: "hello world", - contentMode: "snapshot", + it('emits full snapshots for append (sidebar viewer uses replace mode; no delta merge)', () => { + expect(buildPreviewContentUpdate('hello', 'hello world', 100, 200, 'append')).toEqual({ + content: 'hello world', + contentMode: 'snapshot', lastSnapshotAt: 200, - }); - }); + }) + }) - it("emits deltas for update when the preview extends the previous text", () => { - expect( - buildPreviewContentUpdate("hello", "hello world", 100, 200, "update"), - ).toEqual({ - content: " world", - contentMode: "delta", + it('emits deltas for update when the preview extends the previous text', () => { + expect(buildPreviewContentUpdate('hello', 'hello world', 100, 200, 'update')).toEqual({ + content: ' world', + contentMode: 'delta', lastSnapshotAt: 100, - }); - }); + }) + }) - it("falls back to snapshots for patches and divergent content", () => { - expect( - buildPreviewContentUpdate("hello", "goodbye", 100, 200, "update"), - ).toEqual({ - content: "goodbye", - contentMode: "snapshot", + it('falls back to snapshots for patches and divergent content', () => { + expect(buildPreviewContentUpdate('hello', 'goodbye', 100, 200, 'update')).toEqual({ + content: 'goodbye', + contentMode: 'snapshot', lastSnapshotAt: 200, - }); + }) - expect( - buildPreviewContentUpdate("hello", "hello world", 100, 200, "patch"), - ).toEqual({ - content: "hello world", - contentMode: "snapshot", + expect(buildPreviewContentUpdate('hello', 'hello world', 100, 200, 'patch')).toEqual({ + content: 'hello world', + contentMode: 'snapshot', lastSnapshotAt: 200, - }); - }); + }) + }) - it("drops duplicate tool_result events before forwarding them", async () => { + it('drops duplicate tool_result events before forwarding them', async () => { const toolResult = createEvent({ - streamId: "stream-1", - cursor: "1", + streamId: 'stream-1', + cursor: '1', seq: 1, - requestId: "req-1", + requestId: 'req-1', type: MothershipStreamV1EventType.tool, payload: { - toolCallId: "tool-result-dedupe", - toolName: "search_online", + toolCallId: 'tool-result-dedupe', + toolName: 'search_online', executor: MothershipStreamV1ToolExecutor.sim, mode: MothershipStreamV1ToolMode.async, phase: MothershipStreamV1ToolPhase.result, success: true, - output: { value: "ok" }, + output: { value: 'ok' }, }, - }); + }) const complete = createEvent({ - streamId: "stream-1", - cursor: "2", + streamId: 'stream-1', + cursor: '2', seq: 2, - requestId: "req-1", + requestId: 'req-1', type: MothershipStreamV1EventType.complete, payload: { status: MothershipStreamV1CompletionStatus.complete, }, - }); + }) - vi.mocked(fetch).mockResolvedValueOnce( - createSseResponse([toolResult, toolResult, complete]), - ); + vi.mocked(fetch).mockResolvedValueOnce(createSseResponse([toolResult, toolResult, complete])) - const onEvent = vi.fn(); - const context = createStreamingContext(); + const onEvent = vi.fn() + const context = createStreamingContext() const execContext: ExecutionContext = { - userId: "user-1", - workflowId: "workflow-1", - }; + userId: 'user-1', + workflowId: 'workflow-1', + } - await runStreamLoop( - "https://example.com/mothership/stream", - {}, - context, - execContext, - { - onEvent, - timeout: 1000, - }, - ); + await runStreamLoop('https://example.com/mothership/stream', {}, context, execContext, { + onEvent, + timeout: 1000, + }) expect(onEvent.mock.calls.map(([event]) => event.type)).toEqual([ MothershipStreamV1EventType.tool, MothershipStreamV1EventType.complete, - ]); + ]) expect(onEvent).toHaveBeenCalledWith( expect.objectContaining({ type: MothershipStreamV1EventType.tool, payload: expect.objectContaining({ - toolCallId: "tool-result-dedupe", + toolCallId: 'tool-result-dedupe', phase: MothershipStreamV1ToolPhase.result, }), - }), - ); - expect(context.toolCalls.get("tool-result-dedupe")).toEqual( + }) + ) + expect(context.toolCalls.get('tool-result-dedupe')).toEqual( expect.objectContaining({ - id: "tool-result-dedupe", - name: "search_online", + id: 'tool-result-dedupe', + name: 'search_online', status: MothershipStreamV1ToolOutcome.success, - result: { success: true, output: { value: "ok" } }, - }), - ); - }); + result: { success: true, output: { value: 'ok' } }, + }) + ) + }) - it("fails closed when the shared stream ends before a terminal event", async () => { + it('fails closed when the shared stream ends before a terminal event', async () => { const textEvent = createEvent({ - streamId: "stream-1", - cursor: "1", + streamId: 'stream-1', + cursor: '1', seq: 1, - requestId: "req-1", + requestId: 'req-1', type: MothershipStreamV1EventType.text, payload: { - channel: "assistant", - text: "partial response", + channel: 'assistant', + text: 'partial response', }, - }); + }) - vi.mocked(fetch).mockResolvedValueOnce(createSseResponse([textEvent])); + vi.mocked(fetch).mockResolvedValueOnce(createSseResponse([textEvent])) - const context = createStreamingContext(); + const context = createStreamingContext() const execContext: ExecutionContext = { - userId: "user-1", - workflowId: "workflow-1", - }; + userId: 'user-1', + workflowId: 'workflow-1', + } await expect( - runStreamLoop( - "https://example.com/mothership/stream", - {}, - context, - execContext, - { - timeout: 1000, - }, - ), - ).rejects.toThrow("Copilot backend stream ended before a terminal event"); + runStreamLoop('https://example.com/mothership/stream', {}, context, execContext, { + timeout: 1000, + }) + ).rejects.toThrow('Copilot backend stream ended before a terminal event') expect( context.errors.some((message) => - message.includes( - "Copilot backend stream ended before a terminal event", - ), - ), - ).toBe(true); - }); + message.includes('Copilot backend stream ended before a terminal event') + ) + ).toBe(true) + }) - it("fails closed when the shared stream receives an invalid event", async () => { + it('fails closed when the shared stream receives an invalid event', async () => { vi.mocked(fetch).mockResolvedValueOnce( createSseResponse([ { v: 1, type: MothershipStreamV1EventType.tool, seq: 1, - ts: "2026-01-01T00:00:00.000Z", - stream: { streamId: "stream-1", cursor: "1" }, + ts: '2026-01-01T00:00:00.000Z', + stream: { streamId: 'stream-1', cursor: '1' }, payload: { phase: MothershipStreamV1ToolPhase.result, }, }, - ]), - ); + ]) + ) - const context = createStreamingContext(); + const context = createStreamingContext() const execContext: ExecutionContext = { - userId: "user-1", - workflowId: "workflow-1", - }; + userId: 'user-1', + workflowId: 'workflow-1', + } await expect( - runStreamLoop( - "https://example.com/mothership/stream", - {}, - context, - execContext, - { - timeout: 1000, - }, - ), - ).rejects.toThrow("Received invalid stream event on shared path"); + runStreamLoop('https://example.com/mothership/stream', {}, context, execContext, { + timeout: 1000, + }) + ).rejects.toThrow('Received invalid stream event on shared path') expect( context.errors.some((message) => - message.includes("Received invalid stream event on shared path"), - ), - ).toBe(true); - }); + message.includes('Received invalid stream event on shared path') + ) + ).toBe(true) + }) - it("fails closed when the shared stream receives malformed JSON", async () => { + it('fails closed when the shared stream receives malformed JSON', async () => { vi.mocked(fetch).mockResolvedValueOnce( - createRawSseResponse('data: {"v":1,"type":"text","payload":\n\n'), - ); + createRawSseResponse('data: {"v":1,"type":"text","payload":\n\n') + ) - const context = createStreamingContext(); + const context = createStreamingContext() const execContext: ExecutionContext = { - userId: "user-1", - workflowId: "workflow-1", - }; + userId: 'user-1', + workflowId: 'workflow-1', + } await expect( - runStreamLoop( - "https://example.com/mothership/stream", - {}, - context, - execContext, - { - timeout: 1000, - }, - ), - ).rejects.toThrow("Failed to parse SSE event JSON"); + runStreamLoop('https://example.com/mothership/stream', {}, context, execContext, { + timeout: 1000, + }) + ).rejects.toThrow('Failed to parse SSE event JSON') expect( - context.errors.some((message) => - message.includes("Failed to parse SSE event JSON"), - ), - ).toBe(true); - }); + context.errors.some((message) => message.includes('Failed to parse SSE event JSON')) + ).toBe(true) + }) - it("records a split canonical request id and go trace id from the stream envelope", async () => { + it('records a split canonical request id and go trace id from the stream envelope', async () => { vi.mocked(fetch).mockResolvedValueOnce( createSseResponse([ { v: 1, type: MothershipStreamV1EventType.text, seq: 1, - ts: "2026-01-01T00:00:00.000Z", - stream: { streamId: "stream-1", cursor: "1" }, + ts: '2026-01-01T00:00:00.000Z', + stream: { streamId: 'stream-1', cursor: '1' }, trace: { - requestId: "sim-request-1", - goTraceId: "go-trace-1", + requestId: 'sim-request-1', + goTraceId: 'go-trace-1', }, payload: { - channel: "assistant", - text: "hello", + channel: 'assistant', + text: 'hello', }, }, createEvent({ - streamId: "stream-1", - cursor: "2", + streamId: 'stream-1', + cursor: '2', seq: 2, - requestId: "sim-request-1", + requestId: 'sim-request-1', type: MothershipStreamV1EventType.complete, payload: { status: MothershipStreamV1CompletionStatus.complete, }, }), - ]), - ); + ]) + ) - const context = createStreamingContext(); - context.requestId = "sim-request-1"; + const context = createStreamingContext() + context.requestId = 'sim-request-1' const execContext: ExecutionContext = { - userId: "user-1", - workflowId: "workflow-1", - }; + userId: 'user-1', + workflowId: 'workflow-1', + } - await runStreamLoop( - "https://example.com/mothership/stream", - {}, - context, - execContext, - { - timeout: 1000, - }, - ); + await runStreamLoop('https://example.com/mothership/stream', {}, context, execContext, { + timeout: 1000, + }) - expect(context.requestId).toBe("sim-request-1"); + expect(context.requestId).toBe('sim-request-1') expect( context.trace.build({ outcome: RequestTraceV1Outcome.success, - simRequestId: "sim-request-1", - }).goTraceId, - ).toBe("go-trace-1"); - }); -}); + simRequestId: 'sim-request-1', + }).goTraceId + ).toBe('go-trace-1') + }) +}) diff --git a/apps/sim/lib/copilot/request/go/stream.ts b/apps/sim/lib/copilot/request/go/stream.ts index 1844f08c8f9..95d80f50eca 100644 --- a/apps/sim/lib/copilot/request/go/stream.ts +++ b/apps/sim/lib/copilot/request/go/stream.ts @@ -1,93 +1,82 @@ -import type { Context } from "@opentelemetry/api"; -import { createLogger } from "@sim/logger"; -import { ORCHESTRATION_TIMEOUT_MS } from "@/lib/copilot/constants"; -import { fetchGo } from "@/lib/copilot/request/go/fetch"; -import { MothershipStreamV1SpanLifecycleEvent } from "@/lib/copilot/generated/mothership-stream-v1"; +import type { Context } from '@opentelemetry/api' +import { createLogger } from '@sim/logger' +import { ORCHESTRATION_TIMEOUT_MS } from '@/lib/copilot/constants' +import { MothershipStreamV1SpanLifecycleEvent } from '@/lib/copilot/generated/mothership-stream-v1' +import { fetchGo } from '@/lib/copilot/request/go/fetch' import { buildPreviewContentUpdate, createFilePreviewAdapterState, decodeJsonStringPrefix, extractEditContent, processFilePreviewStreamEvent, -} from "@/lib/copilot/request/go/file-preview-adapter"; -import { - FatalSseEventError, - processSSEStream, -} from "@/lib/copilot/request/go/parser"; +} from '@/lib/copilot/request/go/file-preview-adapter' +import { FatalSseEventError, processSSEStream } from '@/lib/copilot/request/go/parser' import { handleSubagentRouting, sseHandlers, subAgentHandlers, -} from "@/lib/copilot/request/handlers"; +} from '@/lib/copilot/request/handlers' import { eventToStreamEvent, isSubagentSpanStreamEvent, parsePersistedStreamEventEnvelope, -} from "@/lib/copilot/request/session"; -import { - shouldSkipToolCallEvent, - shouldSkipToolResultEvent, -} from "@/lib/copilot/request/sse-utils"; +} from '@/lib/copilot/request/session' +import { shouldSkipToolCallEvent, shouldSkipToolResultEvent } from '@/lib/copilot/request/sse-utils' import type { ExecutionContext, OrchestratorOptions, StreamEvent, StreamingContext, -} from "@/lib/copilot/request/types"; +} from '@/lib/copilot/request/types' -const logger = createLogger("CopilotGoStream"); +const logger = createLogger('CopilotGoStream') -export { - buildPreviewContentUpdate, - decodeJsonStringPrefix, - extractEditContent, -}; +export { buildPreviewContentUpdate, decodeJsonStringPrefix, extractEditContent } -type JsonRecord = Record; +type JsonRecord = Record type SubagentSpanData = { - pending?: boolean; - toolCallId?: string; -}; + pending?: boolean + toolCallId?: string +} function asJsonRecord(value: unknown): JsonRecord | undefined { - return value && typeof value === "object" && !Array.isArray(value) + return value && typeof value === 'object' && !Array.isArray(value) ? (value as JsonRecord) - : undefined; + : undefined } function parseSubagentSpanData(value: unknown): SubagentSpanData | undefined { - const data = asJsonRecord(value); + const data = asJsonRecord(value) if (!data) { - return undefined; + return undefined } - const toolCallId = - typeof data.tool_call_id === "string" ? data.tool_call_id : undefined; - const pending = typeof data.pending === "boolean" ? data.pending : undefined; + const toolCallId = typeof data.tool_call_id === 'string' ? data.tool_call_id : undefined + const pending = typeof data.pending === 'boolean' ? data.pending : undefined return { ...(toolCallId ? { toolCallId } : {}), ...(pending !== undefined ? { pending } : {}), - }; + } } export class CopilotBackendError extends Error { - status?: number; - body?: string; + status?: number + body?: string constructor(message: string, options?: { status?: number; body?: string }) { - super(message); - this.name = "CopilotBackendError"; - this.status = options?.status; - this.body = options?.body; + super(message) + this.name = 'CopilotBackendError' + this.status = options?.status + this.body = options?.body } } export class BillingLimitError extends Error { constructor(public readonly userId: string) { - super("Usage limit reached"); - this.name = "BillingLimitError"; + super('Usage limit reached') + this.name = 'BillingLimitError' } } @@ -99,15 +88,12 @@ export interface StreamLoopOptions extends OrchestratorOptions { * Called for each normalized event BEFORE standard handler dispatch. * Return true to skip the default handler for this event. */ - onBeforeDispatch?: ( - event: StreamEvent, - context: StreamingContext, - ) => boolean | undefined; + onBeforeDispatch?: (event: StreamEvent, context: StreamingContext) => boolean | undefined /** * Called when the Go backend's trace ID (go_trace_id) is first received via SSE. */ - onGoTraceId?: (goTraceId: string) => void; - otelContext?: Context; + onGoTraceId?: (goTraceId: string) => void + otelContext?: Context } /** @@ -122,132 +108,119 @@ export async function runStreamLoop( fetchOptions: RequestInit, context: StreamingContext, execContext: ExecutionContext, - options: StreamLoopOptions, + options: StreamLoopOptions ): Promise { - const { timeout = ORCHESTRATION_TIMEOUT_MS, abortSignal } = options; - const filePreviewAdapterState = createFilePreviewAdapterState(); - - const pathname = new URL(fetchUrl).pathname; - const requestBodyBytes = estimateBodyBytes(fetchOptions.body); - const fetchSpan = context.trace.startSpan( - `HTTP Request → ${pathname}`, - "sim.http.fetch", - { - url: fetchUrl, - method: fetchOptions.method ?? "GET", - requestBodyBytes, - }, - ); - const fetchStart = performance.now(); + const { timeout = ORCHESTRATION_TIMEOUT_MS, abortSignal } = options + const filePreviewAdapterState = createFilePreviewAdapterState() + + const pathname = new URL(fetchUrl).pathname + const requestBodyBytes = estimateBodyBytes(fetchOptions.body) + const fetchSpan = context.trace.startSpan(`HTTP Request → ${pathname}`, 'sim.http.fetch', { + url: fetchUrl, + method: fetchOptions.method ?? 'GET', + requestBodyBytes, + }) + const fetchStart = performance.now() const response = await fetchGo(fetchUrl, { ...fetchOptions, signal: abortSignal, otelContext: options.otelContext, spanName: `sim → go ${pathname}`, - operation: "stream", + operation: 'stream', attributes: { - "copilot.stream": true, - ...(requestBodyBytes - ? { "http.request.content_length": requestBodyBytes } - : {}), + 'copilot.stream': true, + ...(requestBodyBytes ? { 'http.request.content_length': requestBodyBytes } : {}), }, - }); - const headersElapsedMs = Math.round(performance.now() - fetchStart); + }) + const headersElapsedMs = Math.round(performance.now() - fetchStart) fetchSpan.attributes = { ...(fetchSpan.attributes ?? {}), status: response.status, headersMs: headersElapsedMs, - }; + } if (!response.ok) { - context.trace.endSpan(fetchSpan, "error"); - const errorText = await response.text().catch(() => ""); + context.trace.endSpan(fetchSpan, 'error') + const errorText = await response.text().catch(() => '') if (response.status === 402) { - throw new BillingLimitError(execContext.userId); + throw new BillingLimitError(execContext.userId) } throw new CopilotBackendError( `Copilot backend error (${response.status}): ${errorText || response.statusText}`, - { status: response.status, body: errorText || response.statusText }, - ); + { status: response.status, body: errorText || response.statusText } + ) } if (!response.body) { - context.trace.endSpan(fetchSpan, "error"); - throw new CopilotBackendError("Copilot backend response missing body"); + context.trace.endSpan(fetchSpan, 'error') + throw new CopilotBackendError('Copilot backend response missing body') } - context.trace.endSpan(fetchSpan); + context.trace.endSpan(fetchSpan) - const bodySpan = context.trace.startSpan( - `SSE Body → ${pathname}`, - "sim.http.stream_body", - { - url: fetchUrl, - method: fetchOptions.method ?? "GET", - }, - ); - const bodyStart = performance.now(); - let firstEventMs: number | undefined; - let eventsReceived = 0; - let bytesReceived = 0; - let endedOn: string = "terminal"; + const bodySpan = context.trace.startSpan(`SSE Body → ${pathname}`, 'sim.http.stream_body', { + url: fetchUrl, + method: fetchOptions.method ?? 'GET', + }) + const bodyStart = performance.now() + let firstEventMs: number | undefined + let eventsReceived = 0 + let bytesReceived = 0 + let endedOn = 'terminal' - const reader = response.body.getReader(); - const decoder = new TextDecoder(); + const reader = response.body.getReader() + const decoder = new TextDecoder() const timeoutId = setTimeout(() => { - context.errors.push("Request timed out"); - context.streamComplete = true; - endedOn = "timeout"; - reader.cancel().catch(() => {}); - }, timeout); + context.errors.push('Request timed out') + context.streamComplete = true + endedOn = 'timeout' + reader.cancel().catch(() => {}) + }, timeout) try { await processSSEStream(reader, decoder, abortSignal, async (raw) => { if (eventsReceived === 0) { - firstEventMs = Math.round(performance.now() - bodyStart); + firstEventMs = Math.round(performance.now() - bodyStart) } - eventsReceived += 1; + eventsReceived += 1 try { - bytesReceived += JSON.stringify(raw ?? null).length; + bytesReceived += JSON.stringify(raw ?? null).length } catch { // non-serializable event; skip byte accounting } if (abortSignal?.aborted) { - context.wasAborted = true; - return true; + context.wasAborted = true + return true } - const parsedEvent = parsePersistedStreamEventEnvelope(raw); + const parsedEvent = parsePersistedStreamEventEnvelope(raw) if (!parsedEvent.ok) { const detail = [parsedEvent.message, ...(parsedEvent.errors ?? [])] .filter(Boolean) - .join("; "); - const failureMessage = `Received invalid stream event on shared path: ${detail}`; - context.errors.push(failureMessage); - logger.error("Received invalid stream event on shared path", { + .join('; ') + const failureMessage = `Received invalid stream event on shared path: ${detail}` + context.errors.push(failureMessage) + logger.error('Received invalid stream event on shared path', { reason: parsedEvent.reason, message: parsedEvent.message, errors: parsedEvent.errors, - }); - throw new FatalSseEventError(failureMessage); + }) + throw new FatalSseEventError(failureMessage) } - const envelope = parsedEvent.event; - const streamEvent = eventToStreamEvent(envelope); + const envelope = parsedEvent.event + const streamEvent = eventToStreamEvent(envelope) if (envelope.trace?.requestId) { - const goTraceId = envelope.trace.goTraceId || envelope.trace.requestId; - context.trace.setGoTraceId(goTraceId); - options.onGoTraceId?.(goTraceId); + const goTraceId = envelope.trace.goTraceId || envelope.trace.requestId + context.trace.setGoTraceId(goTraceId) + options.onGoTraceId?.(goTraceId) } - if ( - shouldSkipToolCallEvent(streamEvent) || - shouldSkipToolResultEvent(streamEvent) - ) { - return; + if (shouldSkipToolCallEvent(streamEvent) || shouldSkipToolResultEvent(streamEvent)) { + return } await processFilePreviewStreamEvent({ @@ -257,139 +230,127 @@ export async function runStreamLoop( execContext, options, state: filePreviewAdapterState, - }); + }) try { - await options.onEvent?.(streamEvent); + await options.onEvent?.(streamEvent) } catch (error) { - logger.warn("Failed to forward stream event", { + logger.warn('Failed to forward stream event', { type: streamEvent.type, error: error instanceof Error ? error.message : String(error), - }); + }) } // Yield a macrotask so Node.js flushes the HTTP response buffer to // the browser. Microtask yields (await Promise.resolve()) are not // enough — the I/O layer needs a full event loop tick to write. - await new Promise((resolve) => setImmediate(resolve)); + await new Promise((resolve) => setImmediate(resolve)) if (options.onBeforeDispatch?.(streamEvent, context)) { - return context.streamComplete || undefined; + return context.streamComplete || undefined } if (isSubagentSpanStreamEvent(streamEvent)) { - const spanData = parseSubagentSpanData(streamEvent.payload.data); - const toolCallId = - streamEvent.scope?.parentToolCallId || spanData?.toolCallId; - const subagentName = streamEvent.payload.agent; - const spanEvt = streamEvent.payload.event; - const isPendingPause = spanData?.pending === true; + const spanData = parseSubagentSpanData(streamEvent.payload.data) + const toolCallId = streamEvent.scope?.parentToolCallId || spanData?.toolCallId + const subagentName = streamEvent.payload.agent + const spanEvt = streamEvent.payload.event + const isPendingPause = spanData?.pending === true if (spanEvt === MothershipStreamV1SpanLifecycleEvent.start) { - const lastParent = - context.subAgentParentStack[context.subAgentParentStack.length - 1]; - const lastBlock = - context.contentBlocks[context.contentBlocks.length - 1]; + const lastParent = context.subAgentParentStack[context.subAgentParentStack.length - 1] + const lastBlock = context.contentBlocks[context.contentBlocks.length - 1] if (toolCallId) { if (lastParent !== toolCallId) { - context.subAgentParentStack.push(toolCallId); + context.subAgentParentStack.push(toolCallId) } - context.subAgentParentToolCallId = toolCallId; - context.subAgentContent[toolCallId] ??= ""; - context.subAgentToolCalls[toolCallId] ??= []; + context.subAgentParentToolCallId = toolCallId + context.subAgentContent[toolCallId] ??= '' + context.subAgentToolCalls[toolCallId] ??= [] } if ( subagentName && !( lastParent === toolCallId && - lastBlock?.type === "subagent" && + lastBlock?.type === 'subagent' && lastBlock.content === subagentName ) ) { context.contentBlocks.push({ - type: "subagent", + type: 'subagent', content: subagentName, timestamp: Date.now(), - }); + }) } - return; + return } if (spanEvt === MothershipStreamV1SpanLifecycleEvent.end) { if (isPendingPause) { - return; + return } if (context.subAgentParentStack.length > 0) { - context.subAgentParentStack.pop(); + context.subAgentParentStack.pop() } else { - logger.warn("subagent end without matching start"); + logger.warn('subagent end without matching start') } context.subAgentParentToolCallId = context.subAgentParentStack.length > 0 - ? context.subAgentParentStack[ - context.subAgentParentStack.length - 1 - ] - : undefined; - return; + ? context.subAgentParentStack[context.subAgentParentStack.length - 1] + : undefined + return } } if (handleSubagentRouting(streamEvent, context)) { - const handler = subAgentHandlers[streamEvent.type]; + const handler = subAgentHandlers[streamEvent.type] if (handler) { - await handler(streamEvent, context, execContext, options); + await handler(streamEvent, context, execContext, options) } - return context.streamComplete || undefined; + return context.streamComplete || undefined } - const handler = sseHandlers[streamEvent.type]; + const handler = sseHandlers[streamEvent.type] if (handler) { - await handler(streamEvent, context, execContext, options); + await handler(streamEvent, context, execContext, options) } - return context.streamComplete || undefined; - }); - - if ( - !context.streamComplete && - !abortSignal?.aborted && - !context.wasAborted - ) { - const streamPath = new URL(fetchUrl).pathname; - const message = `Copilot backend stream ended before a terminal event on ${streamPath}`; - context.errors.push(message); - logger.error("Copilot backend stream ended before a terminal event", { + return context.streamComplete || undefined + }) + + if (!context.streamComplete && !abortSignal?.aborted && !context.wasAborted) { + const streamPath = new URL(fetchUrl).pathname + const message = `Copilot backend stream ended before a terminal event on ${streamPath}` + context.errors.push(message) + logger.error('Copilot backend stream ended before a terminal event', { path: streamPath, requestId: context.requestId, messageId: context.messageId, - }); - endedOn = "closed_no_terminal"; - throw new CopilotBackendError(message, { status: 503 }); + }) + endedOn = 'closed_no_terminal' + throw new CopilotBackendError(message, { status: 503 }) } } catch (error) { - if ( - error instanceof FatalSseEventError && - !context.errors.includes(error.message) - ) { - context.errors.push(error.message); + if (error instanceof FatalSseEventError && !context.errors.includes(error.message)) { + context.errors.push(error.message) } - if (endedOn === "terminal") { + if (endedOn === 'terminal') { endedOn = error instanceof CopilotBackendError - ? "backend_error" + ? 'backend_error' : error instanceof BillingLimitError - ? "billing_limit" - : "error"; + ? 'billing_limit' + : 'error' } - throw error; + throw error } finally { if (abortSignal?.aborted) { - context.wasAborted = true; - await reader.cancel().catch(() => {}); - if (endedOn === "terminal") { - endedOn = "aborted"; + context.wasAborted = true + await reader.cancel().catch(() => {}) + if (endedOn === 'terminal') { + endedOn = 'aborted' } } - clearTimeout(timeoutId); + clearTimeout(timeoutId) - const bodyDurationMs = Math.round(performance.now() - bodyStart); + const bodyDurationMs = Math.round(performance.now() - bodyStart) bodySpan.attributes = { ...(bodySpan.attributes ?? {}), eventsReceived, @@ -397,26 +358,26 @@ export async function runStreamLoop( firstEventMs, endedOn, durationMs: bodyDurationMs, - }; + } context.trace.endSpan( bodySpan, - endedOn === "terminal" ? "ok" : endedOn === "aborted" ? "cancelled" : "error", - ); + endedOn === 'terminal' ? 'ok' : endedOn === 'aborted' ? 'cancelled' : 'error' + ) } } function estimateBodyBytes(body: BodyInit | null | undefined): number { if (!body) { - return 0; + return 0 } - if (typeof body === "string") { - return body.length; + if (typeof body === 'string') { + return body.length } if (body instanceof ArrayBuffer) { - return body.byteLength; + return body.byteLength } if (ArrayBuffer.isView(body)) { - return body.byteLength; + return body.byteLength } - return 0; + return 0 } diff --git a/apps/sim/lib/copilot/request/handlers/span.ts b/apps/sim/lib/copilot/request/handlers/span.ts index 7c09d8fc5b1..978e6ec0780 100644 --- a/apps/sim/lib/copilot/request/handlers/span.ts +++ b/apps/sim/lib/copilot/request/handlers/span.ts @@ -31,18 +31,14 @@ export const handleSpanEvent: StreamHandler = (event, context) => { const scopeAgent = typeof payload.agent === 'string' && payload.agent ? payload.agent : 'subagent' if (evt === MothershipStreamV1SpanLifecycleEvent.start) { - const span = context.trace.startSpan( - `subagent:${scopeAgent}`, - 'go.subagent', - { - agent: scopeAgent, - parentToolCallId: event.scope?.parentToolCallId, - }, - ) + const span = context.trace.startSpan(`subagent:${scopeAgent}`, 'go.subagent', { + agent: scopeAgent, + parentToolCallId: event.scope?.parentToolCallId, + }) context.subAgentTraceSpans ??= new Map() - context.subAgentTraceSpans.set(scopeAgent + ':' + (event.scope?.parentToolCallId || ''), span) + context.subAgentTraceSpans.set(`${scopeAgent}:${event.scope?.parentToolCallId || ''}`, span) } else if (evt === MothershipStreamV1SpanLifecycleEvent.end) { - const key = scopeAgent + ':' + (event.scope?.parentToolCallId || '') + const key = `${scopeAgent}:${event.scope?.parentToolCallId || ''}` const span = context.subAgentTraceSpans?.get(key) if (span) { context.trace.endSpan(span, 'ok') @@ -56,14 +52,10 @@ export const handleSpanEvent: StreamHandler = (event, context) => { kind === MothershipStreamV1SpanPayloadKind.structured_result || kind === MothershipStreamV1SpanPayloadKind.subagent_result ) { - const span = context.trace.startSpan( - `${kind}:${payload.agent ?? 'main'}`, - `go.${kind}`, - { - agent: payload.agent, - hasData: payload.data !== undefined, - }, - ) + const span = context.trace.startSpan(`${kind}:${payload.agent ?? 'main'}`, `go.${kind}`, { + agent: payload.agent, + hasData: payload.data !== undefined, + }) context.trace.endSpan(span, 'ok') return } diff --git a/apps/sim/lib/copilot/request/lifecycle/finalize.ts b/apps/sim/lib/copilot/request/lifecycle/finalize.ts index f242c30831e..12cc789abc5 100644 --- a/apps/sim/lib/copilot/request/lifecycle/finalize.ts +++ b/apps/sim/lib/copilot/request/lifecycle/finalize.ts @@ -41,7 +41,10 @@ export async function finalizeStream( if (aborted) { await handleAborted(result, publisher, runId, requestId) } else if (!result.success) { - span.setStatus({ code: SpanStatusCode.ERROR, message: result.error || 'orchestration failed' }) + span.setStatus({ + code: SpanStatusCode.ERROR, + message: result.error || 'orchestration failed', + }) await handleError(result, publisher, runId, requestId) } else { await handleSuccess(publisher, runId, requestId) diff --git a/apps/sim/lib/copilot/request/lifecycle/headless.test.ts b/apps/sim/lib/copilot/request/lifecycle/headless.test.ts index 49ada3d6574..7af0bfd58d8 100644 --- a/apps/sim/lib/copilot/request/lifecycle/headless.test.ts +++ b/apps/sim/lib/copilot/request/lifecycle/headless.test.ts @@ -2,96 +2,94 @@ * @vitest-environment node */ -import { propagation, trace } from "@opentelemetry/api"; -import { W3CTraceContextPropagator } from "@opentelemetry/core"; -import { BasicTracerProvider } from "@opentelemetry/sdk-trace-base"; -import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; -import { RequestTraceV1Outcome } from "@/lib/copilot/generated/request-trace-v1"; -import type { OrchestratorResult } from "@/lib/copilot/request/types"; +import { propagation, trace } from '@opentelemetry/api' +import { W3CTraceContextPropagator } from '@opentelemetry/core' +import { BasicTracerProvider } from '@opentelemetry/sdk-trace-base' +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' +import { RequestTraceV1Outcome } from '@/lib/copilot/generated/request-trace-v1' +import type { OrchestratorResult } from '@/lib/copilot/request/types' const { runCopilotLifecycle } = vi.hoisted(() => ({ runCopilotLifecycle: vi.fn(), -})); +})) -vi.mock("@/lib/copilot/request/lifecycle/run", () => ({ +vi.mock('@/lib/copilot/request/lifecycle/run', () => ({ runCopilotLifecycle, -})); +})) -import { runHeadlessCopilotLifecycle } from "./headless"; +import { runHeadlessCopilotLifecycle } from './headless' -function createLifecycleResult( - overrides?: Partial, -): OrchestratorResult { +function createLifecycleResult(overrides?: Partial): OrchestratorResult { return { success: true, - content: "done", + content: 'done', contentBlocks: [], toolCalls: [], - chatId: "chat-1", + chatId: 'chat-1', ...overrides, - }; + } } -describe("runHeadlessCopilotLifecycle", () => { +describe('runHeadlessCopilotLifecycle', () => { beforeEach(() => { - trace.setGlobalTracerProvider(new BasicTracerProvider()); - propagation.setGlobalPropagator(new W3CTraceContextPropagator()); + trace.setGlobalTracerProvider(new BasicTracerProvider()) + propagation.setGlobalPropagator(new W3CTraceContextPropagator()) vi.stubGlobal( - "fetch", + 'fetch', vi.fn().mockResolvedValue( new Response(null, { status: 200, - }), - ), - ); - }); + }) + ) + ) + }) afterEach(() => { - vi.clearAllMocks(); - vi.unstubAllGlobals(); - }); + vi.clearAllMocks() + vi.unstubAllGlobals() + }) - it("reports a successful headless trace", async () => { + it('reports a successful headless trace', async () => { runCopilotLifecycle.mockResolvedValueOnce( createLifecycleResult({ usage: { prompt: 10, completion: 5 }, cost: { input: 1, output: 2, total: 3 }, - }), - ); + }) + ) const result = await runHeadlessCopilotLifecycle( { - message: "hello", - messageId: "req-1", + message: 'hello', + messageId: 'req-1', }, { - userId: "user-1", - chatId: "chat-1", - workflowId: "workflow-1", - goRoute: "/api/mothership/execute", + userId: 'user-1', + chatId: 'chat-1', + workflowId: 'workflow-1', + goRoute: '/api/mothership/execute', interactive: false, - }, - ); + } + ) - expect(result.success).toBe(true); + expect(result.success).toBe(true) expect(runCopilotLifecycle).toHaveBeenCalledWith( - expect.objectContaining({ messageId: "req-1" }), + expect.objectContaining({ messageId: 'req-1' }), expect.objectContaining({ - simRequestId: "req-1", + simRequestId: 'req-1', trace: expect.any(Object), - chatId: "chat-1", - }), - ); - - expect(fetch).toHaveBeenCalledTimes(1); - const [url, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit]; - expect(url).toContain("/api/traces"); - const body = JSON.parse(String(init.body)); + chatId: 'chat-1', + }) + ) + + expect(fetch).toHaveBeenCalledTimes(1) + const [url, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit] + expect(url).toContain('/api/traces') + const body = JSON.parse(String(init.body)) expect(body).toEqual( expect.objectContaining({ - simRequestId: "req-1", + simRequestId: 'req-1', outcome: RequestTraceV1Outcome.success, - chatId: "chat-1", + chatId: 'chat-1', usage: { inputTokens: 10, outputTokens: 5, @@ -100,131 +98,123 @@ describe("runHeadlessCopilotLifecycle", () => { rawTotalCost: 3, billedTotalCost: 3, }, - }), - ); - }); + }) + ) + }) - it("reports an error trace when the lifecycle result is unsuccessful", async () => { + it('reports an error trace when the lifecycle result is unsuccessful', async () => { runCopilotLifecycle.mockResolvedValueOnce( createLifecycleResult({ success: false, - error: "failed", - }), - ); + error: 'failed', + }) + ) const result = await runHeadlessCopilotLifecycle( { - message: "hello", - messageId: "req-2", + message: 'hello', + messageId: 'req-2', }, { - userId: "user-1", - chatId: "chat-1", - workflowId: "workflow-1", - goRoute: "/api/mothership/execute", + userId: 'user-1', + chatId: 'chat-1', + workflowId: 'workflow-1', + goRoute: '/api/mothership/execute', interactive: false, - }, - ); + } + ) - expect(result.success).toBe(false); - const [, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit]; - const body = JSON.parse(String(init.body)); - expect(body.outcome).toBe(RequestTraceV1Outcome.error); - }); + expect(result.success).toBe(false) + const [, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit] + const body = JSON.parse(String(init.body)) + expect(body.outcome).toBe(RequestTraceV1Outcome.error) + }) - it("prefers an explicit simRequestId over the payload messageId", async () => { - runCopilotLifecycle.mockResolvedValueOnce(createLifecycleResult()); + it('prefers an explicit simRequestId over the payload messageId', async () => { + runCopilotLifecycle.mockResolvedValueOnce(createLifecycleResult()) await runHeadlessCopilotLifecycle( { - message: "hello", - messageId: "message-req-id", + message: 'hello', + messageId: 'message-req-id', }, { - userId: "user-1", - chatId: "chat-1", - workflowId: "workflow-1", - simRequestId: "workflow-request-id", - goRoute: "/api/mothership/execute", + userId: 'user-1', + chatId: 'chat-1', + workflowId: 'workflow-1', + simRequestId: 'workflow-request-id', + goRoute: '/api/mothership/execute', interactive: false, - }, - ); + } + ) expect(runCopilotLifecycle).toHaveBeenCalledWith( - expect.objectContaining({ messageId: "message-req-id" }), + expect.objectContaining({ messageId: 'message-req-id' }), expect.objectContaining({ - simRequestId: "workflow-request-id", - }), - ); + simRequestId: 'workflow-request-id', + }) + ) - const [, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit]; - const body = JSON.parse(String(init.body)); - expect(body.simRequestId).toBe("workflow-request-id"); - }); + const [, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit] + const body = JSON.parse(String(init.body)) + expect(body.simRequestId).toBe('workflow-request-id') + }) - it("passes an OTel context to the lifecycle and trace report", async () => { - let lifecycleTraceparent = ""; + it('passes an OTel context to the lifecycle and trace report', async () => { + let lifecycleTraceparent = '' runCopilotLifecycle.mockImplementationOnce(async (_payload, options) => { - const { traceHeaders } = - await import("@/lib/copilot/request/go/propagation"); - lifecycleTraceparent = - traceHeaders({}, options.otelContext).traceparent ?? ""; - return createLifecycleResult(); - }); + const { traceHeaders } = await import('@/lib/copilot/request/go/propagation') + lifecycleTraceparent = traceHeaders({}, options.otelContext).traceparent ?? '' + return createLifecycleResult() + }) await runHeadlessCopilotLifecycle( { - message: "hello", - messageId: "req-otel", + message: 'hello', + messageId: 'req-otel', }, { - userId: "user-1", - chatId: "chat-1", - workflowId: "workflow-1", - goRoute: "/api/mothership/execute", + userId: 'user-1', + chatId: 'chat-1', + workflowId: 'workflow-1', + goRoute: '/api/mothership/execute', interactive: false, - }, - ); + } + ) - expect(lifecycleTraceparent).toMatch( - /^00-[0-9a-f]{32}-[0-9a-f]{16}-0[0-9a-f]$/, - ); - const [, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit]; - const headers = init.headers as Record; + expect(lifecycleTraceparent).toMatch(/^00-[0-9a-f]{32}-[0-9a-f]{16}-0[0-9a-f]$/) + const [, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit] + const headers = init.headers as Record // The outbound trace report now runs inside its own OTel child span, so // traceparent has the same trace-id as the lifecycle but a different // span-id. Both must stay on the same trace. - const lifecycleTraceId = lifecycleTraceparent.split("-")[1]; - expect(headers.traceparent).toMatch( - /^00-[0-9a-f]{32}-[0-9a-f]{16}-0[0-9a-f]$/, - ); - expect(headers.traceparent.split("-")[1]).toBe(lifecycleTraceId); - expect(headers.traceparent.split("-")[2]).not.toBe( - lifecycleTraceparent.split("-")[2], - ); - }); - - it("reports an error trace when the lifecycle throws", async () => { - runCopilotLifecycle.mockRejectedValueOnce(new Error("kaboom")); + const lifecycleTraceId = lifecycleTraceparent.split('-')[1] + expect(headers.traceparent).toMatch(/^00-[0-9a-f]{32}-[0-9a-f]{16}-0[0-9a-f]$/) + expect(headers.traceparent.split('-')[1]).toBe(lifecycleTraceId) + expect(headers.traceparent.split('-')[2]).not.toBe(lifecycleTraceparent.split('-')[2]) + }) + + it('reports an error trace when the lifecycle throws', async () => { + runCopilotLifecycle.mockRejectedValueOnce(new Error('kaboom')) await expect( runHeadlessCopilotLifecycle( { - message: "hello", - messageId: "req-3", + message: 'hello', + messageId: 'req-3', }, { - userId: "user-1", - chatId: "chat-1", - workflowId: "workflow-1", - goRoute: "/api/mothership/execute", + userId: 'user-1', + chatId: 'chat-1', + workflowId: 'workflow-1', + goRoute: '/api/mothership/execute', interactive: false, - }, - ), - ).rejects.toThrow("kaboom"); - - const [, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit]; - const body = JSON.parse(String(init.body)); - expect(body.outcome).toBe(RequestTraceV1Outcome.error); - }); -}); + } + ) + ).rejects.toThrow('kaboom') + + const [, init] = vi.mocked(fetch).mock.calls[0] as [string, RequestInit] + const body = JSON.parse(String(init.body)) + expect(body.outcome).toBe(RequestTraceV1Outcome.error) + }) +}) diff --git a/apps/sim/lib/copilot/request/lifecycle/headless.ts b/apps/sim/lib/copilot/request/lifecycle/headless.ts index 0083ff51692..fd239396808 100644 --- a/apps/sim/lib/copilot/request/lifecycle/headless.ts +++ b/apps/sim/lib/copilot/request/lifecycle/headless.ts @@ -1,43 +1,38 @@ -import { createLogger } from "@sim/logger"; -import type { RequestTraceV1Outcome as RequestTraceOutcome } from "@/lib/copilot/generated/request-trace-v1"; +import { createLogger } from '@sim/logger' +import type { RequestTraceV1Outcome as RequestTraceOutcome } from '@/lib/copilot/generated/request-trace-v1' import { RequestTraceV1Outcome, RequestTraceV1SpanStatus, -} from "@/lib/copilot/generated/request-trace-v1"; -import type { CopilotLifecycleOptions } from "@/lib/copilot/request/lifecycle/run"; -import { runCopilotLifecycle } from "@/lib/copilot/request/lifecycle/run"; -import { withCopilotOtelContext } from "@/lib/copilot/request/otel"; -import { reportTrace, TraceCollector } from "@/lib/copilot/request/trace"; -import type { OrchestratorResult } from "@/lib/copilot/request/types"; -import { generateId } from "@/lib/core/utils/uuid"; +} from '@/lib/copilot/generated/request-trace-v1' +import type { CopilotLifecycleOptions } from '@/lib/copilot/request/lifecycle/run' +import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run' +import { withCopilotOtelContext } from '@/lib/copilot/request/otel' +import { reportTrace, TraceCollector } from '@/lib/copilot/request/trace' +import type { OrchestratorResult } from '@/lib/copilot/request/types' +import { generateId } from '@/lib/core/utils/uuid' -const logger = createLogger("CopilotHeadlessLifecycle"); +const logger = createLogger('CopilotHeadlessLifecycle') export async function runHeadlessCopilotLifecycle( requestPayload: Record, - options: CopilotLifecycleOptions, + options: CopilotLifecycleOptions ): Promise { const simRequestId = - typeof options.simRequestId === "string" && options.simRequestId.length > 0 + typeof options.simRequestId === 'string' && options.simRequestId.length > 0 ? options.simRequestId - : typeof requestPayload.messageId === "string" && - requestPayload.messageId.length > 0 + : typeof requestPayload.messageId === 'string' && requestPayload.messageId.length > 0 ? requestPayload.messageId - : generateId(); - const trace = new TraceCollector(); - const requestSpan = trace.startSpan( - "Headless Mothership Request", - "request", - { - route: options.goRoute, - workflowId: options.workflowId, - workspaceId: options.workspaceId, - chatId: options.chatId, - }, - ); + : generateId() + const trace = new TraceCollector() + const requestSpan = trace.startSpan('Headless Mothership Request', 'request', { + route: options.goRoute, + workflowId: options.workflowId, + workspaceId: options.workspaceId, + chatId: options.chatId, + }) - let result: OrchestratorResult | undefined; - let outcome: RequestTraceOutcome = RequestTraceV1Outcome.error; + let result: OrchestratorResult | undefined + let outcome: RequestTraceOutcome = RequestTraceV1Outcome.error return withCopilotOtelContext( { @@ -47,7 +42,7 @@ export async function runHeadlessCopilotLifecycle( workflowId: options.workflowId, executionId: options.executionId, runId: options.runId, - transport: "headless", + transport: 'headless', }, async (otelContext) => { try { @@ -56,18 +51,18 @@ export async function runHeadlessCopilotLifecycle( trace, simRequestId, otelContext, - }); + }) outcome = options.abortSignal?.aborted ? RequestTraceV1Outcome.cancelled : result.success ? RequestTraceV1Outcome.success - : RequestTraceV1Outcome.error; - return result; + : RequestTraceV1Outcome.error + return result } catch (error) { outcome = options.abortSignal?.aborted ? RequestTraceV1Outcome.cancelled - : RequestTraceV1Outcome.error; - throw error; + : RequestTraceV1Outcome.error + throw error } finally { trace.endSpan( requestSpan, @@ -75,8 +70,8 @@ export async function runHeadlessCopilotLifecycle( ? RequestTraceV1SpanStatus.ok : outcome === RequestTraceV1Outcome.cancelled ? RequestTraceV1SpanStatus.cancelled - : RequestTraceV1SpanStatus.error, - ); + : RequestTraceV1SpanStatus.error + ) try { await reportTrace( @@ -89,16 +84,16 @@ export async function runHeadlessCopilotLifecycle( usage: result?.usage, cost: result?.cost, }), - otelContext, - ); + otelContext + ) } catch (error) { - logger.warn("Failed to report headless trace", { + logger.warn('Failed to report headless trace', { simRequestId, chatId: result?.chatId ?? options.chatId, error: error instanceof Error ? error.message : String(error), - }); + }) } } - }, - ); + } + ) } diff --git a/apps/sim/lib/copilot/request/lifecycle/run.ts b/apps/sim/lib/copilot/request/lifecycle/run.ts index 260863bb6e3..e15414a03e1 100644 --- a/apps/sim/lib/copilot/request/lifecycle/run.ts +++ b/apps/sim/lib/copilot/request/lifecycle/run.ts @@ -1,66 +1,63 @@ -import type { Context } from "@opentelemetry/api"; -import { createLogger } from "@sim/logger"; -import { - createRunSegment, - updateRunStatus, -} from "@/lib/copilot/async-runs/repository"; -import { SIM_AGENT_API_URL, SIM_AGENT_VERSION } from "@/lib/copilot/constants"; +import type { Context } from '@opentelemetry/api' +import { createLogger } from '@sim/logger' +import { createRunSegment, updateRunStatus } from '@/lib/copilot/async-runs/repository' +import { SIM_AGENT_API_URL, SIM_AGENT_VERSION } from '@/lib/copilot/constants' import { MothershipStreamV1EventType, MothershipStreamV1RunKind, MothershipStreamV1ToolOutcome, -} from "@/lib/copilot/generated/mothership-stream-v1"; -import { createStreamingContext } from "@/lib/copilot/request/context/request-context"; -import { buildToolCallSummaries } from "@/lib/copilot/request/context/result"; +} from '@/lib/copilot/generated/mothership-stream-v1' +import { createStreamingContext } from '@/lib/copilot/request/context/request-context' +import { buildToolCallSummaries } from '@/lib/copilot/request/context/result' import { BillingLimitError, CopilotBackendError, runStreamLoop, -} from "@/lib/copilot/request/go/stream"; +} from '@/lib/copilot/request/go/stream' import { getToolCallTerminalData, requireToolCallStateResult, setTerminalToolCallState, -} from "@/lib/copilot/request/tool-call-state"; -import { handleBillingLimitResponse } from "@/lib/copilot/request/tools/billing"; -import { executeToolAndReport } from "@/lib/copilot/request/tools/executor"; -import type { TraceCollector } from "@/lib/copilot/request/trace"; -import { RequestTraceV1SpanStatus } from "@/lib/copilot/request/trace"; +} from '@/lib/copilot/request/tool-call-state' +import { handleBillingLimitResponse } from '@/lib/copilot/request/tools/billing' +import { executeToolAndReport } from '@/lib/copilot/request/tools/executor' +import type { TraceCollector } from '@/lib/copilot/request/trace' +import { RequestTraceV1SpanStatus } from '@/lib/copilot/request/trace' import type { ExecutionContext, OrchestratorOptions, OrchestratorResult, StreamEvent, StreamingContext, -} from "@/lib/copilot/request/types"; -import { prepareExecutionContext } from "@/lib/copilot/tools/handlers/context"; -import { env } from "@/lib/core/config/env"; -import { generateId } from "@/lib/core/utils/uuid"; -import { getEffectiveDecryptedEnv } from "@/lib/environment/utils"; +} from '@/lib/copilot/request/types' +import { prepareExecutionContext } from '@/lib/copilot/tools/handlers/context' +import { env } from '@/lib/core/config/env' +import { generateId } from '@/lib/core/utils/uuid' +import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' -const logger = createLogger("CopilotLifecycle"); +const logger = createLogger('CopilotLifecycle') -const MAX_RESUME_ATTEMPTS = 3; -const RESUME_BACKOFF_MS = [250, 500, 1000] as const; +const MAX_RESUME_ATTEMPTS = 3 +const RESUME_BACKOFF_MS = [250, 500, 1000] as const export interface CopilotLifecycleOptions extends OrchestratorOptions { - userId: string; - workflowId?: string; - workspaceId?: string; - chatId?: string; - executionId?: string; - runId?: string; - goRoute?: string; - trace?: TraceCollector; - simRequestId?: string; - otelContext?: Context; - onGoTraceId?: (goTraceId: string) => void; - executionContext?: ExecutionContext; + userId: string + workflowId?: string + workspaceId?: string + chatId?: string + executionId?: string + runId?: string + goRoute?: string + trace?: TraceCollector + simRequestId?: string + otelContext?: Context + onGoTraceId?: (goTraceId: string) => void + executionContext?: ExecutionContext } export async function runCopilotLifecycle( requestPayload: Record, - options: CopilotLifecycleOptions, + options: CopilotLifecycleOptions ): Promise { const { userId, @@ -69,12 +66,10 @@ export async function runCopilotLifecycle( chatId, executionId, runId, - goRoute = "/api/copilot", - } = options; + goRoute = '/api/copilot', + } = options const payloadMsgId = - typeof requestPayload?.messageId === "string" - ? requestPayload.messageId - : generateId(); + typeof requestPayload?.messageId === 'string' ? requestPayload.messageId : generateId() const runIdentity = await ensureHeadlessRunIdentity({ requestPayload, userId, @@ -84,9 +79,9 @@ export async function runCopilotLifecycle( executionId, runId, messageId: payloadMsgId, - }); - const resolvedExecutionId = runIdentity.executionId ?? executionId; - const resolvedRunId = runIdentity.runId ?? runId; + }) + const resolvedExecutionId = runIdentity.executionId ?? executionId + const resolvedRunId = runIdentity.runId ?? runId const lifecycleOptions: CopilotLifecycleOptions = { ...options, executionId: resolvedExecutionId, @@ -102,7 +97,7 @@ export async function runCopilotLifecycle( }, } : {}), - }; + } const execContext = lifecycleOptions.executionContext ?? @@ -114,7 +109,7 @@ export async function runCopilotLifecycle( executionId: resolvedExecutionId, runId: resolvedRunId, abortSignal: lifecycleOptions.abortSignal, - })); + })) const context = createStreamingContext({ chatId, @@ -123,16 +118,10 @@ export async function runCopilotLifecycle( runId: resolvedRunId, messageId: payloadMsgId, ...(lifecycleOptions.trace ? { trace: lifecycleOptions.trace } : {}), - }); + }) try { - await runCheckpointLoop( - requestPayload, - context, - execContext, - lifecycleOptions, - goRoute, - ); + await runCheckpointLoop(requestPayload, context, execContext, lifecycleOptions, goRoute) const result: OrchestratorResult = { success: context.errors.length === 0 && !context.wasAborted, @@ -144,24 +133,21 @@ export async function runCopilotLifecycle( errors: context.errors.length ? context.errors : undefined, usage: context.usage, cost: context.cost, - }; - await lifecycleOptions.onComplete?.(result); - return result; + } + await lifecycleOptions.onComplete?.(result) + return result } catch (error) { - const err = - error instanceof Error - ? error - : new Error("Copilot orchestration failed"); - logger.error("Copilot orchestration failed", { error: err.message }); - await lifecycleOptions.onError?.(err); + const err = error instanceof Error ? error : new Error('Copilot orchestration failed') + logger.error('Copilot orchestration failed', { error: err.message }) + await lifecycleOptions.onError?.(err) return { success: false, - content: "", + content: '', contentBlocks: [], toolCalls: [], chatId: context.chatId, error: err.message, - }; + } } } @@ -174,21 +160,21 @@ async function runCheckpointLoop( context: StreamingContext, execContext: ExecutionContext, options: CopilotLifecycleOptions, - initialRoute: string, + initialRoute: string ): Promise { - let route = initialRoute; - let payload: Record = initialPayload; - let resumeAttempt = 0; - const callerOnEvent = options.onEvent; + let route = initialRoute + let payload: Record = initialPayload + let resumeAttempt = 0 + const callerOnEvent = options.onEvent for (;;) { - context.streamComplete = false; - const isResume = route === "/api/tools/resume"; + context.streamComplete = false + const isResume = route === '/api/tools/resume' if (isResume && isAborted(options, context)) { - cancelPendingTools(context); - context.awaitingAsyncContinuation = undefined; - break; + cancelPendingTools(context) + context.awaitingAsyncContinuation = undefined + break } const loopOptions = { @@ -200,102 +186,90 @@ async function runCheckpointLoop( options.runId ) { try { - await updateRunStatus(options.runId, "paused_waiting_for_tool"); + await updateRunStatus(options.runId, 'paused_waiting_for_tool') } catch (error) { - logger.warn("Failed to mark run as paused_waiting_for_tool", { + logger.warn('Failed to mark run as paused_waiting_for_tool', { runId: options.runId, error: error instanceof Error ? error.message : String(error), - }); + }) } } - await callerOnEvent?.(event); + await callerOnEvent?.(event) }, - }; + } const streamSpan = context.trace.startSpan( - isResume ? "Sim → Go (Resume)" : "Sim → Go Stream", - isResume ? "lifecycle.resume" : "sim.stream", + isResume ? 'Sim → Go (Resume)' : 'Sim → Go Stream', + isResume ? 'lifecycle.resume' : 'sim.stream', { route, isResume, ...(isResume ? { attempt: resumeAttempt } : {}), - }, - ); - context.trace.setActiveSpan(streamSpan); + } + ) + context.trace.setActiveSpan(streamSpan) - logger.info("Starting stream loop", { + logger.info('Starting stream loop', { route, isResume, resumeAttempt, pendingToolPromises: context.pendingToolPromises.size, toolCallCount: context.toolCalls.size, hasCheckpoint: !!context.awaitingAsyncContinuation, - }); + }) try { await runStreamLoop( `${SIM_AGENT_API_URL}${route}`, { - method: "POST", + method: 'POST', headers: { - "Content-Type": "application/json", - ...(env.COPILOT_API_KEY - ? { "x-api-key": env.COPILOT_API_KEY } - : {}), - "X-Client-Version": SIM_AGENT_VERSION, - ...(options.simRequestId - ? { "X-Sim-Request-ID": options.simRequestId } - : {}), + 'Content-Type': 'application/json', + ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), + 'X-Client-Version': SIM_AGENT_VERSION, + ...(options.simRequestId ? { 'X-Sim-Request-ID': options.simRequestId } : {}), }, body: JSON.stringify(payload), }, context, execContext, - loopOptions, - ); + loopOptions + ) const streamStatus = isAborted(options, context) ? RequestTraceV1SpanStatus.cancelled : context.errors.length > 0 ? RequestTraceV1SpanStatus.error - : RequestTraceV1SpanStatus.ok; - context.trace.endSpan(streamSpan, streamStatus); - context.trace.setActiveSpan(undefined); - resumeAttempt = 0; + : RequestTraceV1SpanStatus.ok + context.trace.endSpan(streamSpan, streamStatus) + context.trace.setActiveSpan(undefined) + resumeAttempt = 0 } catch (streamError) { - context.trace.endSpan(streamSpan, RequestTraceV1SpanStatus.error); - context.trace.setActiveSpan(undefined); + context.trace.endSpan(streamSpan, RequestTraceV1SpanStatus.error) + context.trace.setActiveSpan(undefined) if (streamError instanceof BillingLimitError) { - await handleBillingLimitResponse( - streamError.userId, - context, - execContext, - options, - ); - break; + await handleBillingLimitResponse(streamError.userId, context, execContext, options) + break } if ( isResume && isRetryableStreamError(streamError) && resumeAttempt < MAX_RESUME_ATTEMPTS - 1 ) { - resumeAttempt++; - const backoff = RESUME_BACKOFF_MS[resumeAttempt - 1] ?? 1000; - logger.warn("Resume stream failed, retrying", { + resumeAttempt++ + const backoff = RESUME_BACKOFF_MS[resumeAttempt - 1] ?? 1000 + logger.warn('Resume stream failed, retrying', { attempt: resumeAttempt + 1, maxAttempts: MAX_RESUME_ATTEMPTS, backoffMs: backoff, - error: - streamError instanceof Error - ? streamError.message - : String(streamError), - }); - await sleepWithAbort(backoff, options.abortSignal); - continue; + error: streamError instanceof Error ? streamError.message : String(streamError), + }) + await sleepWithAbort(backoff, options.abortSignal) + continue } - throw streamError; + throw streamError } - logger.info("Stream loop completed", { + logger.info('Stream loop completed', { route, isResume, isAborted: isAborted(options, context), @@ -304,142 +278,131 @@ async function runCheckpointLoop( pendingToolPromises: context.pendingToolPromises.size, streamComplete: context.streamComplete, toolCallCount: context.toolCalls.size, - }); + }) if (isAborted(options, context)) { - cancelPendingTools(context); - context.awaitingAsyncContinuation = undefined; - break; + cancelPendingTools(context) + context.awaitingAsyncContinuation = undefined + break } - const continuation = context.awaitingAsyncContinuation; - if (!continuation) break; + const continuation = context.awaitingAsyncContinuation + if (!continuation) break if (context.pendingToolPromises.size > 0) { - const waitSpan = context.trace.startSpan( - "Wait for Tools", - "lifecycle.wait_tools", - { - checkpointId: continuation.checkpointId, - pendingCount: context.pendingToolPromises.size, - }, - ); - logger.info("Waiting for in-flight tool executions before resume", { + const waitSpan = context.trace.startSpan('Wait for Tools', 'lifecycle.wait_tools', { + checkpointId: continuation.checkpointId, + pendingCount: context.pendingToolPromises.size, + }) + logger.info('Waiting for in-flight tool executions before resume', { checkpointId: continuation.checkpointId, pendingCount: context.pendingToolPromises.size, - }); - await Promise.allSettled(context.pendingToolPromises.values()); - context.trace.endSpan(waitSpan); + }) + await Promise.allSettled(context.pendingToolPromises.values()) + context.trace.endSpan(waitSpan) } if (isAborted(options, context)) { - cancelPendingTools(context); - context.awaitingAsyncContinuation = undefined; - break; + cancelPendingTools(context) + context.awaitingAsyncContinuation = undefined + break } - const undispatchedToolIds = continuation.pendingToolCallIds.filter( - (toolCallId) => { - const tool = context.toolCalls.get(toolCallId); - return ( - !!tool && - !tool.result && - !tool.error && - !context.pendingToolPromises.has(toolCallId) && - tool.status !== "executing" - ); - }, - ); + const undispatchedToolIds = continuation.pendingToolCallIds.filter((toolCallId) => { + const tool = context.toolCalls.get(toolCallId) + return ( + !!tool && + !tool.result && + !tool.error && + !context.pendingToolPromises.has(toolCallId) && + tool.status !== 'executing' + ) + }) if (undispatchedToolIds.length > 0) { - logger.warn( - "Checkpointed tools were never dispatched; executing before resume", - { - checkpointId: continuation.checkpointId, - toolCallIds: undispatchedToolIds, - }, - ); + logger.warn('Checkpointed tools were never dispatched; executing before resume', { + checkpointId: continuation.checkpointId, + toolCallIds: undispatchedToolIds, + }) await Promise.allSettled( undispatchedToolIds.map((toolCallId) => - executeToolAndReport(toolCallId, context, execContext, options), - ), - ); + executeToolAndReport(toolCallId, context, execContext, options) + ) + ) } if (isAborted(options, context)) { - cancelPendingTools(context); - context.awaitingAsyncContinuation = undefined; - break; + cancelPendingTools(context) + context.awaitingAsyncContinuation = undefined + break } const results: Array<{ - callId: string; - name: string; - data: unknown; - success: boolean; - }> = []; + callId: string + name: string + data: unknown + success: boolean + }> = [] for (const toolCallId of continuation.pendingToolCallIds) { if (isAborted(options, context)) { - cancelPendingTools(context); - context.awaitingAsyncContinuation = undefined; - break; + cancelPendingTools(context) + context.awaitingAsyncContinuation = undefined + break } - const tool = context.toolCalls.get(toolCallId); + const tool = context.toolCalls.get(toolCallId) if (!tool || !tool.result) { - logger.error("Missing tool result for pending tool call", { + logger.error('Missing tool result for pending tool call', { toolCallId, checkpointId: continuation.checkpointId, hasToolEntry: !!tool, toolName: tool?.name, toolStatus: tool?.status, hasPendingPromise: context.pendingToolPromises.has(toolCallId), - }); - throw new Error( - `Cannot resume: missing result for pending tool call ${toolCallId}`, - ); + }) + throw new Error(`Cannot resume: missing result for pending tool call ${toolCallId}`) } results.push({ callId: toolCallId, - name: tool.name || "", + name: tool.name || '', data: getToolCallTerminalData(tool), success: requireToolCallStateResult(tool).success, - }); + }) } if (isAborted(options, context)) { - cancelPendingTools(context); - context.awaitingAsyncContinuation = undefined; - break; + cancelPendingTools(context) + context.awaitingAsyncContinuation = undefined + break } - logger.info("Resuming with tool results", { + logger.info('Resuming with tool results', { checkpointId: continuation.checkpointId, runId: continuation.runId, toolCount: results.length, pendingToolCallIds: continuation.pendingToolCallIds, frameCount: continuation.frames?.length ?? 0, - }); + }) - context.awaitingAsyncContinuation = undefined; - route = "/api/tools/resume"; + context.awaitingAsyncContinuation = undefined + route = '/api/tools/resume' payload = { streamId: context.messageId, checkpointId: continuation.checkpointId, results, - }; + } if (isAborted(options, context)) { - cancelPendingTools(context); - context.awaitingAsyncContinuation = undefined; - break; + cancelPendingTools(context) + context.awaitingAsyncContinuation = undefined + break } - logger.info("Prepared resume request payload", { + logger.info('Prepared resume request payload', { route, streamId: context.messageId, checkpointId: continuation.checkpointId, resultCount: results.length, - }); + }) } } @@ -450,80 +413,64 @@ async function runCheckpointLoop( async function buildExecutionContext( requestPayload: Record, params: { - userId: string; - workflowId?: string; - workspaceId?: string; - chatId?: string; - executionId?: string; - runId?: string; - abortSignal?: AbortSignal; - }, + userId: string + workflowId?: string + workspaceId?: string + chatId?: string + executionId?: string + runId?: string + abortSignal?: AbortSignal + } ): Promise { - const { - userId, - workflowId, - workspaceId, - chatId, - executionId, - runId, - abortSignal, - } = params; + const { userId, workflowId, workspaceId, chatId, executionId, runId, abortSignal } = params const userTimezone = - typeof requestPayload?.userTimezone === "string" - ? requestPayload.userTimezone - : undefined; - const requestMode = - typeof requestPayload?.mode === "string" ? requestPayload.mode : undefined; + typeof requestPayload?.userTimezone === 'string' ? requestPayload.userTimezone : undefined + const requestMode = typeof requestPayload?.mode === 'string' ? requestPayload.mode : undefined - let execContext: ExecutionContext; + let execContext: ExecutionContext if (workflowId) { - execContext = await prepareExecutionContext(userId, workflowId, chatId); + execContext = await prepareExecutionContext(userId, workflowId, chatId) } else { - const decryptedEnvVars = await getEffectiveDecryptedEnv( - userId, - workspaceId, - ); + const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId) execContext = { userId, - workflowId: "", + workflowId: '', workspaceId, chatId, decryptedEnvVars, - }; + } } - if (userTimezone) execContext.userTimezone = userTimezone; - execContext.copilotToolExecution = true; - if (requestMode) execContext.requestMode = requestMode; + if (userTimezone) execContext.userTimezone = userTimezone + execContext.copilotToolExecution = true + if (requestMode) execContext.requestMode = requestMode execContext.messageId = - typeof requestPayload?.messageId === "string" - ? requestPayload.messageId - : undefined; - execContext.executionId = executionId; - execContext.runId = runId; - execContext.abortSignal = abortSignal; - return execContext; + typeof requestPayload?.messageId === 'string' ? requestPayload.messageId : undefined + execContext.executionId = executionId + execContext.runId = runId + execContext.abortSignal = abortSignal + return execContext } async function ensureHeadlessRunIdentity(input: { - requestPayload: Record; - userId: string; - workflowId?: string; - workspaceId?: string; - chatId?: string; - executionId?: string; - runId?: string; - messageId: string; + requestPayload: Record + userId: string + workflowId?: string + workspaceId?: string + chatId?: string + executionId?: string + runId?: string + messageId: string }): Promise<{ executionId?: string; runId?: string }> { if (!input.chatId || input.executionId || input.runId) { return { executionId: input.executionId, runId: input.runId, - }; + } } - const executionId = generateId(); - const runId = generateId(); + const executionId = generateId() + const runId = generateId() try { await createRunSegment({ @@ -534,26 +481,21 @@ async function ensureHeadlessRunIdentity(input: { workflowId: input.workflowId, workspaceId: input.workspaceId, streamId: input.messageId, - model: - typeof input.requestPayload?.model === "string" - ? input.requestPayload.model - : null, + model: typeof input.requestPayload?.model === 'string' ? input.requestPayload.model : null, provider: - typeof input.requestPayload?.provider === "string" - ? input.requestPayload.provider - : null, + typeof input.requestPayload?.provider === 'string' ? input.requestPayload.provider : null, requestContext: { - source: "headless_lifecycle", + source: 'headless_lifecycle', }, - }); - return { executionId, runId }; + }) + return { executionId, runId } } catch (error) { - logger.warn("Failed to create headless run identity", { + logger.warn('Failed to create headless run identity', { chatId: input.chatId, messageId: input.messageId, error: error instanceof Error ? error.message : String(error), - }); - return {}; + }) + return {} } } @@ -561,54 +503,51 @@ async function ensureHeadlessRunIdentity(input: { // Helpers // --------------------------------------------------------------------------- -function isAborted( - options: CopilotLifecycleOptions, - context: StreamingContext, -): boolean { - return !!(options.abortSignal?.aborted || context.wasAborted); +function isAborted(options: CopilotLifecycleOptions, context: StreamingContext): boolean { + return !!(options.abortSignal?.aborted || context.wasAborted) } function cancelPendingTools(context: StreamingContext): void { for (const [, toolCall] of context.toolCalls) { - if (toolCall.status === "pending" || toolCall.status === "executing") { + if (toolCall.status === 'pending' || toolCall.status === 'executing') { setTerminalToolCallState(toolCall, { status: MothershipStreamV1ToolOutcome.cancelled, - error: "Stopped by user", - }); + error: 'Stopped by user', + }) } } } function isRetryableStreamError(error: unknown): boolean { - if (error instanceof DOMException && error.name === "AbortError") { - return false; + if (error instanceof DOMException && error.name === 'AbortError') { + return false } if (error instanceof CopilotBackendError) { - return error.status !== undefined && error.status >= 500; + return error.status !== undefined && error.status >= 500 } if (error instanceof TypeError) { - return true; + return true } - return false; + return false } function sleepWithAbort(ms: number, abortSignal?: AbortSignal): Promise { if (!abortSignal) { - return new Promise((resolve) => setTimeout(resolve, ms)); + return new Promise((resolve) => setTimeout(resolve, ms)) } if (abortSignal.aborted) { - return Promise.resolve(); + return Promise.resolve() } return new Promise((resolve) => { const timeoutId = setTimeout(() => { - abortSignal.removeEventListener("abort", onAbort); - resolve(); - }, ms); + abortSignal.removeEventListener('abort', onAbort) + resolve() + }, ms) const onAbort = () => { - clearTimeout(timeoutId); - abortSignal.removeEventListener("abort", onAbort); - resolve(); - }; - abortSignal.addEventListener("abort", onAbort, { once: true }); - }); + clearTimeout(timeoutId) + abortSignal.removeEventListener('abort', onAbort) + resolve() + } + abortSignal.addEventListener('abort', onAbort, { once: true }) + }) } diff --git a/apps/sim/lib/copilot/request/lifecycle/start.test.ts b/apps/sim/lib/copilot/request/lifecycle/start.test.ts index a3a7f19d033..5477fc9994b 100644 --- a/apps/sim/lib/copilot/request/lifecycle/start.test.ts +++ b/apps/sim/lib/copilot/request/lifecycle/start.test.ts @@ -2,11 +2,11 @@ * @vitest-environment node */ -import { propagation, trace } from "@opentelemetry/api"; -import { W3CTraceContextPropagator } from "@opentelemetry/core"; -import { BasicTracerProvider } from "@opentelemetry/sdk-trace-base"; -import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; -import { MothershipStreamV1EventType } from "@/lib/copilot/generated/mothership-stream-v1"; +import { propagation, trace } from '@opentelemetry/api' +import { W3CTraceContextPropagator } from '@opentelemetry/core' +import { BasicTracerProvider } from '@opentelemetry/sdk-trace-base' +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' +import { MothershipStreamV1EventType } from '@/lib/copilot/generated/mothership-stream-v1' const { runCopilotLifecycle, @@ -34,20 +34,20 @@ const { cleanupAbortMarker: vi.fn(), hasAbortMarker: vi.fn(), releasePendingChatStream: vi.fn(), -})); +})) -vi.mock("@/lib/copilot/request/lifecycle/run", () => ({ +vi.mock('@/lib/copilot/request/lifecycle/run', () => ({ runCopilotLifecycle, -})); +})) -vi.mock("@/lib/copilot/async-runs/repository", () => ({ +vi.mock('@/lib/copilot/async-runs/repository', () => ({ createRunSegment, updateRunStatus, -})); +})) -let mockPublisherController: ReadableStreamDefaultController | null = null; +let mockPublisherController: ReadableStreamDefaultController | null = null -vi.mock("@/lib/copilot/request/session", () => ({ +vi.mock('@/lib/copilot/request/session', () => ({ resetBuffer, clearFilePreviewSessions, scheduleBufferCleanup, @@ -61,43 +61,37 @@ vi.mock("@/lib/copilot/request/session", () => ({ unregisterActiveStream: vi.fn(), startAbortPoller: vi.fn().mockReturnValue(setInterval(() => {}, 999999)), SSE_RESPONSE_HEADERS: {}, - StreamWriter: vi.fn().mockImplementation(function () { - return { - attach: vi - .fn() - .mockImplementation((ctrl: ReadableStreamDefaultController) => { - mockPublisherController = ctrl; - }), - startKeepalive: vi.fn(), - stopKeepalive: vi.fn(), - flush: vi.fn(), - close: vi.fn().mockImplementation(() => { - try { - mockPublisherController?.close(); - } catch { - // already closed - } - }), - markDisconnected: vi.fn(), - publish: vi - .fn() - .mockImplementation(async (event: Record) => { - appendEvent(event); - }), - get clientDisconnected() { - return false; - }, - get sawComplete() { - return false; - }, - }; - }), -})); -vi.mock("@/lib/copilot/request/session/sse", () => ({ + StreamWriter: vi.fn().mockImplementation(() => ({ + attach: vi.fn().mockImplementation((ctrl: ReadableStreamDefaultController) => { + mockPublisherController = ctrl + }), + startKeepalive: vi.fn(), + stopKeepalive: vi.fn(), + flush: vi.fn(), + close: vi.fn().mockImplementation(() => { + try { + mockPublisherController?.close() + } catch { + // already closed + } + }), + markDisconnected: vi.fn(), + publish: vi.fn().mockImplementation(async (event: Record) => { + appendEvent(event) + }), + get clientDisconnected() { + return false + }, + get sawComplete() { + return false + }, + })), +})) +vi.mock('@/lib/copilot/request/session/sse', () => ({ SSE_RESPONSE_HEADERS: {}, -})); +})) -vi.mock("@sim/db", () => ({ +vi.mock('@sim/db', () => ({ db: { update: vi.fn(() => ({ set: vi.fn(() => ({ @@ -105,154 +99,150 @@ vi.mock("@sim/db", () => ({ })), })), }, -})); +})) -vi.mock("@/lib/copilot/tasks", () => ({ +vi.mock('@/lib/copilot/tasks', () => ({ taskPubSub: null, -})); +})) -import { createSSEStream } from "./start"; +import { createSSEStream } from './start' async function drainStream(stream: ReadableStream) { - const reader = stream.getReader(); + const reader = stream.getReader() while (true) { - const { done } = await reader.read(); - if (done) break; + const { done } = await reader.read() + if (done) break } } -describe("createSSEStream terminal error handling", () => { +describe('createSSEStream terminal error handling', () => { beforeEach(() => { - vi.clearAllMocks(); - trace.setGlobalTracerProvider(new BasicTracerProvider()); - propagation.setGlobalPropagator(new W3CTraceContextPropagator()); + vi.clearAllMocks() + trace.setGlobalTracerProvider(new BasicTracerProvider()) + propagation.setGlobalPropagator(new W3CTraceContextPropagator()) vi.stubGlobal( - "fetch", + 'fetch', vi.fn().mockResolvedValue( - new Response(JSON.stringify({ title: "Test title" }), { + new Response(JSON.stringify({ title: 'Test title' }), { status: 200, headers: { - "Content-Type": "application/json", + 'Content-Type': 'application/json', }, - }), - ), - ); - resetBuffer.mockResolvedValue(undefined); - clearFilePreviewSessions.mockResolvedValue(undefined); - scheduleBufferCleanup.mockResolvedValue(undefined); - scheduleFilePreviewSessionCleanup.mockResolvedValue(undefined); + }) + ) + ) + resetBuffer.mockResolvedValue(undefined) + clearFilePreviewSessions.mockResolvedValue(undefined) + scheduleBufferCleanup.mockResolvedValue(undefined) + scheduleFilePreviewSessionCleanup.mockResolvedValue(undefined) allocateCursor - .mockResolvedValueOnce({ seq: 1, cursor: "1" }) - .mockResolvedValueOnce({ seq: 2, cursor: "2" }) - .mockResolvedValueOnce({ seq: 3, cursor: "3" }); - appendEvent.mockImplementation(async (event: unknown) => event); - cleanupAbortMarker.mockResolvedValue(undefined); - hasAbortMarker.mockResolvedValue(false); - releasePendingChatStream.mockResolvedValue(undefined); - createRunSegment.mockResolvedValue(null); - updateRunStatus.mockResolvedValue(null); - }); + .mockResolvedValueOnce({ seq: 1, cursor: '1' }) + .mockResolvedValueOnce({ seq: 2, cursor: '2' }) + .mockResolvedValueOnce({ seq: 3, cursor: '3' }) + appendEvent.mockImplementation(async (event: unknown) => event) + cleanupAbortMarker.mockResolvedValue(undefined) + hasAbortMarker.mockResolvedValue(false) + releasePendingChatStream.mockResolvedValue(undefined) + createRunSegment.mockResolvedValue(null) + updateRunStatus.mockResolvedValue(null) + }) afterEach(() => { - vi.unstubAllGlobals(); - }); + vi.unstubAllGlobals() + }) - it("writes a terminal error event before close when orchestration returns success=false", async () => { + it('writes a terminal error event before close when orchestration returns success=false', async () => { runCopilotLifecycle.mockResolvedValue({ success: false, - error: "resume failed", - content: "", + error: 'resume failed', + content: '', contentBlocks: [], toolCalls: [], - }); + }) const stream = createSSEStream({ - requestPayload: { message: "hello" }, - userId: "user-1", - streamId: "stream-1", - executionId: "exec-1", - runId: "run-1", + requestPayload: { message: 'hello' }, + userId: 'user-1', + streamId: 'stream-1', + executionId: 'exec-1', + runId: 'run-1', currentChat: null, isNewChat: false, - message: "hello", - titleModel: "gpt-5.4", - requestId: "req-1", + message: 'hello', + titleModel: 'gpt-5.4', + requestId: 'req-1', orchestrateOptions: {}, - }); + }) - await drainStream(stream); + await drainStream(stream) expect(appendEvent).toHaveBeenCalledWith( expect.objectContaining({ type: MothershipStreamV1EventType.error, - }), - ); - expect(scheduleBufferCleanup).toHaveBeenCalledWith("stream-1"); - }); + }) + ) + expect(scheduleBufferCleanup).toHaveBeenCalledWith('stream-1') + }) - it("writes the thrown terminal error event before close for replay durability", async () => { - runCopilotLifecycle.mockRejectedValue(new Error("kaboom")); + it('writes the thrown terminal error event before close for replay durability', async () => { + runCopilotLifecycle.mockRejectedValue(new Error('kaboom')) const stream = createSSEStream({ - requestPayload: { message: "hello" }, - userId: "user-1", - streamId: "stream-1", - executionId: "exec-1", - runId: "run-1", + requestPayload: { message: 'hello' }, + userId: 'user-1', + streamId: 'stream-1', + executionId: 'exec-1', + runId: 'run-1', currentChat: null, isNewChat: false, - message: "hello", - titleModel: "gpt-5.4", - requestId: "req-1", + message: 'hello', + titleModel: 'gpt-5.4', + requestId: 'req-1', orchestrateOptions: {}, - }); + }) - await drainStream(stream); + await drainStream(stream) expect(appendEvent).toHaveBeenCalledWith( expect.objectContaining({ type: MothershipStreamV1EventType.error, - }), - ); - expect(scheduleBufferCleanup).toHaveBeenCalledWith("stream-1"); - }); + }) + ) + expect(scheduleBufferCleanup).toHaveBeenCalledWith('stream-1') + }) - it("passes an OTel context into the streaming lifecycle", async () => { - let lifecycleTraceparent = ""; + it('passes an OTel context into the streaming lifecycle', async () => { + let lifecycleTraceparent = '' runCopilotLifecycle.mockImplementation(async (_payload, options) => { - const { traceHeaders } = - await import("@/lib/copilot/request/go/propagation"); - lifecycleTraceparent = - traceHeaders({}, options.otelContext).traceparent ?? ""; + const { traceHeaders } = await import('@/lib/copilot/request/go/propagation') + lifecycleTraceparent = traceHeaders({}, options.otelContext).traceparent ?? '' return { success: true, - content: "OK", + content: 'OK', contentBlocks: [], toolCalls: [], - }; - }); + } + }) const stream = createSSEStream({ - requestPayload: { message: "hello" }, - userId: "user-1", - streamId: "stream-1", - executionId: "exec-1", - runId: "run-1", + requestPayload: { message: 'hello' }, + userId: 'user-1', + streamId: 'stream-1', + executionId: 'exec-1', + runId: 'run-1', currentChat: null, isNewChat: false, - message: "hello", - titleModel: "gpt-5.4", - requestId: "req-otel", + message: 'hello', + titleModel: 'gpt-5.4', + requestId: 'req-otel', orchestrateOptions: { - goRoute: "/api/mothership", - workflowId: "workflow-1", + goRoute: '/api/mothership', + workflowId: 'workflow-1', }, - }); + }) - await drainStream(stream); + await drainStream(stream) - expect(lifecycleTraceparent).toMatch( - /^00-[0-9a-f]{32}-[0-9a-f]{16}-0[0-9a-f]$/, - ); - }); -}); + expect(lifecycleTraceparent).toMatch(/^00-[0-9a-f]{32}-[0-9a-f]{16}-0[0-9a-f]$/) + }) +}) diff --git a/apps/sim/lib/copilot/request/lifecycle/start.ts b/apps/sim/lib/copilot/request/lifecycle/start.ts index b76da327f76..7541fd231ce 100644 --- a/apps/sim/lib/copilot/request/lifecycle/start.ts +++ b/apps/sim/lib/copilot/request/lifecycle/start.ts @@ -1,18 +1,19 @@ -import { context as otelContextApi, type Context } from "@opentelemetry/api"; -import { db } from "@sim/db"; -import { copilotChats } from "@sim/db/schema"; -import { createLogger } from "@sim/logger"; -import { eq } from "drizzle-orm"; -import { createRunSegment } from "@/lib/copilot/async-runs/repository"; -import { SIM_AGENT_API_URL } from "@/lib/copilot/constants"; +import { type Context, context as otelContextApi } from '@opentelemetry/api' +import { db } from '@sim/db' +import { copilotChats } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { eq } from 'drizzle-orm' +import { createRunSegment } from '@/lib/copilot/async-runs/repository' +import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { MothershipStreamV1EventType, MothershipStreamV1SessionKind, -} from "@/lib/copilot/generated/mothership-stream-v1"; -import { RequestTraceV1Outcome } from "@/lib/copilot/generated/request-trace-v1"; -import { finalizeStream } from "@/lib/copilot/request/lifecycle/finalize"; -import type { CopilotLifecycleOptions } from "@/lib/copilot/request/lifecycle/run"; -import { runCopilotLifecycle } from "@/lib/copilot/request/lifecycle/run"; +} from '@/lib/copilot/generated/mothership-stream-v1' +import { RequestTraceV1Outcome } from '@/lib/copilot/generated/request-trace-v1' +import { finalizeStream } from '@/lib/copilot/request/lifecycle/finalize' +import type { CopilotLifecycleOptions } from '@/lib/copilot/request/lifecycle/run' +import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run' +import { type CopilotLifecycleOutcome, startCopilotOtelRoot } from '@/lib/copilot/request/otel' import { cleanupAbortMarker, clearFilePreviewSessions, @@ -24,39 +25,35 @@ import { scheduleFilePreviewSessionCleanup, startAbortPoller, unregisterActiveStream, -} from "@/lib/copilot/request/session"; -import { SSE_RESPONSE_HEADERS } from "@/lib/copilot/request/session/sse"; -import { - type CopilotLifecycleOutcome, - startCopilotOtelRoot, -} from "@/lib/copilot/request/otel"; -import { reportTrace, TraceCollector } from "@/lib/copilot/request/trace"; -import { taskPubSub } from "@/lib/copilot/tasks"; -import { env } from "@/lib/core/config/env"; +} from '@/lib/copilot/request/session' +import { SSE_RESPONSE_HEADERS } from '@/lib/copilot/request/session/sse' +import { reportTrace, TraceCollector } from '@/lib/copilot/request/trace' +import { taskPubSub } from '@/lib/copilot/tasks' +import { env } from '@/lib/core/config/env' -export { SSE_RESPONSE_HEADERS }; +export { SSE_RESPONSE_HEADERS } -const logger = createLogger("CopilotChatStreaming"); +const logger = createLogger('CopilotChatStreaming') type CurrentChatSummary = { - title?: string | null; -} | null; + title?: string | null +} | null export interface StreamingOrchestrationParams { - requestPayload: Record; - userId: string; - streamId: string; - executionId: string; - runId: string; - chatId?: string; - currentChat: CurrentChatSummary; - isNewChat: boolean; - message: string; - titleModel: string; - titleProvider?: string; - requestId: string; - workspaceId?: string; - orchestrateOptions: Omit; + requestPayload: Record + userId: string + streamId: string + executionId: string + runId: string + chatId?: string + currentChat: CurrentChatSummary + isNewChat: boolean + message: string + titleModel: string + titleProvider?: string + requestId: string + workspaceId?: string + orchestrateOptions: Omit /** * Pre-started gen_ai.agent.execute root returned by * `startCopilotOtelRoot`. When provided, this stream binds every nested @@ -64,12 +61,10 @@ export interface StreamingOrchestrationParams { * this function starts its own root internally (kept for back-compat * with the headless path). */ - otelRoot?: ReturnType; + otelRoot?: ReturnType } -export function createSSEStream( - params: StreamingOrchestrationParams, -): ReadableStream { +export function createSSEStream(params: StreamingOrchestrationParams): ReadableStream { const { requestPayload, userId, @@ -86,7 +81,7 @@ export function createSSEStream( workspaceId, orchestrateOptions, otelRoot, - } = params; + } = params // If the caller (POST handler) already started the gen_ai.agent.execute // root so that pre-stream setup work (persistUserMessage, resource @@ -103,51 +98,43 @@ export function createSSEStream( executionId, runId, streamId, - transport: "stream", - }); + transport: 'stream', + }) - const abortController = new AbortController(); - registerActiveStream(streamId, abortController); + const abortController = new AbortController() + registerActiveStream(streamId, abortController) - const publisher = new StreamWriter({ streamId, chatId, requestId }); + const publisher = new StreamWriter({ streamId, chatId, requestId }) - const collector = new TraceCollector(); + const collector = new TraceCollector() return new ReadableStream({ async start(controller) { - publisher.attach(controller); + publisher.attach(controller) // Re-enter the root OTel context. Node's AsyncLocalStorage does // not survive the Next.js handler -> ReadableStream.start boundary, // so nested `withCopilotSpan` / `withDbSpan` calls would otherwise // orphan into new traces. await otelContextApi.with(activeOtelRoot.context, async () => { - const otelContext = activeOtelRoot.context; - let rootOutcome: CopilotLifecycleOutcome = - RequestTraceV1Outcome.error; - let rootError: unknown = undefined; + const otelContext = activeOtelRoot.context + let rootOutcome: CopilotLifecycleOutcome = RequestTraceV1Outcome.error + let rootError: unknown try { - const requestSpan = collector.startSpan( - "Mothership Request", - "request", - { - streamId, - chatId, - runId, - }, - ); - let outcome: CopilotLifecycleOutcome = RequestTraceV1Outcome.error; + const requestSpan = collector.startSpan('Mothership Request', 'request', { + streamId, + chatId, + runId, + }) + let outcome: CopilotLifecycleOutcome = RequestTraceV1Outcome.error let lifecycleResult: | { - usage?: { prompt: number; completion: number }; - cost?: { input: number; output: number; total: number }; + usage?: { prompt: number; completion: number } + cost?: { input: number; output: number; total: number } } - | undefined; + | undefined - await Promise.all([ - resetBuffer(streamId), - clearFilePreviewSessions(streamId), - ]); + await Promise.all([resetBuffer(streamId), clearFilePreviewSessions(streamId)]) if (chatId) { createRunSegment({ @@ -155,27 +142,23 @@ export function createSSEStream( executionId, chatId, userId, - workflowId: - (requestPayload.workflowId as string | undefined) || null, + workflowId: (requestPayload.workflowId as string | undefined) || null, workspaceId, streamId, model: (requestPayload.model as string | undefined) || null, provider: (requestPayload.provider as string | undefined) || null, requestContext: { requestId }, }).catch((error) => { - logger.warn( - `[${requestId}] Failed to create copilot run segment`, - { - error: error instanceof Error ? error.message : String(error), - }, - ); - }); + logger.warn(`[${requestId}] Failed to create copilot run segment`, { + error: error instanceof Error ? error.message : String(error), + }) + }) } const abortPoller = startAbortPoller(streamId, abortController, { requestId, - }); - publisher.startKeepalive(); + }) + publisher.startKeepalive() if (chatId) { publisher.publish({ @@ -184,7 +167,7 @@ export function createSSEStream( kind: MothershipStreamV1SessionKind.chat, chatId, }, - }); + }) } fireTitleGeneration({ @@ -198,7 +181,7 @@ export function createSSEStream( requestId, publisher, otelContext, - }); + }) try { const result = await runCopilotLifecycle(requestPayload, { @@ -210,84 +193,73 @@ export function createSSEStream( otelContext, abortSignal: abortController.signal, onEvent: async (event) => { - await publisher.publish(event); + await publisher.publish(event) }, - }); + }) - lifecycleResult = result; + lifecycleResult = result outcome = abortController.signal.aborted ? RequestTraceV1Outcome.cancelled : result.success ? RequestTraceV1Outcome.success - : RequestTraceV1Outcome.error; + : RequestTraceV1Outcome.error await finalizeStream( result, publisher, runId, abortController.signal.aborted, - requestId, - ); + requestId + ) } catch (error) { outcome = abortController.signal.aborted ? RequestTraceV1Outcome.cancelled - : RequestTraceV1Outcome.error; + : RequestTraceV1Outcome.error if (publisher.clientDisconnected) { - logger.info( - `[${requestId}] Stream errored after client disconnect`, - { - error: - error instanceof Error ? error.message : "Stream error", - }, - ); + logger.info(`[${requestId}] Stream errored after client disconnect`, { + error: error instanceof Error ? error.message : 'Stream error', + }) } - logger.error( - `[${requestId}] Unexpected orchestration error:`, - error, - ); + logger.error(`[${requestId}] Unexpected orchestration error:`, error) const syntheticResult = { success: false as const, - content: "", + content: '', contentBlocks: [], toolCalls: [], - error: - "An unexpected error occurred while processing the response.", - }; + error: 'An unexpected error occurred while processing the response.', + } await finalizeStream( syntheticResult, publisher, runId, abortController.signal.aborted, - requestId, - ); + requestId + ) } finally { collector.endSpan( requestSpan, outcome === RequestTraceV1Outcome.success - ? "ok" + ? 'ok' : outcome === RequestTraceV1Outcome.cancelled - ? "cancelled" - : "error", - ); + ? 'cancelled' + : 'error' + ) - clearInterval(abortPoller); + clearInterval(abortPoller) try { - await publisher.close(); + await publisher.close() } catch (error) { - logger.warn( - `[${requestId}] Failed to flush stream persistence during close`, - { - error: error instanceof Error ? error.message : String(error), - }, - ); + logger.warn(`[${requestId}] Failed to flush stream persistence during close`, { + error: error instanceof Error ? error.message : String(error), + }) } - unregisterActiveStream(streamId); + unregisterActiveStream(streamId) if (chatId) { - await releasePendingChatStream(chatId, streamId); + await releasePendingChatStream(chatId, streamId) } - await scheduleBufferCleanup(streamId); - await scheduleFilePreviewSessionCleanup(streamId); - await cleanupAbortMarker(streamId); + await scheduleBufferCleanup(streamId) + await scheduleFilePreviewSessionCleanup(streamId) + await cleanupAbortMarker(streamId) const trace = collector.build({ outcome, @@ -298,40 +270,39 @@ export function createSSEStream( executionId, usage: lifecycleResult?.usage, cost: lifecycleResult?.cost, - }); - reportTrace(trace, otelContext).catch(() => {}); - rootOutcome = outcome; + }) + reportTrace(trace, otelContext).catch(() => {}) + rootOutcome = outcome if (lifecycleResult?.usage) { activeOtelRoot.span.setAttributes({ - "gen_ai.usage.input_tokens": lifecycleResult.usage.prompt ?? 0, - "gen_ai.usage.output_tokens": - lifecycleResult.usage.completion ?? 0, - }); + 'gen_ai.usage.input_tokens': lifecycleResult.usage.prompt ?? 0, + 'gen_ai.usage.output_tokens': lifecycleResult.usage.completion ?? 0, + }) } if (lifecycleResult?.cost) { activeOtelRoot.span.setAttributes({ - "billing.cost.input_usd": lifecycleResult.cost.input ?? 0, - "billing.cost.output_usd": lifecycleResult.cost.output ?? 0, - "billing.cost.total_usd": lifecycleResult.cost.total ?? 0, - }); + 'billing.cost.input_usd': lifecycleResult.cost.input ?? 0, + 'billing.cost.output_usd': lifecycleResult.cost.output ?? 0, + 'billing.cost.total_usd': lifecycleResult.cost.total ?? 0, + }) } } } catch (error) { - rootOutcome = RequestTraceV1Outcome.error; - rootError = error; - throw error; + rootOutcome = RequestTraceV1Outcome.error + rootError = error + throw error } finally { // `finish` is idempotent, so it's safe whether the POST // handler started the root (and may also call finish on an // error path before the stream ran) or we did. - activeOtelRoot.finish(rootOutcome, rootError); + activeOtelRoot.finish(rootOutcome, rootError) } - }); + }) }, cancel() { - publisher.markDisconnected(); + publisher.markDisconnected() }, - }); + }) } // --------------------------------------------------------------------------- @@ -339,16 +310,16 @@ export function createSSEStream( // --------------------------------------------------------------------------- function fireTitleGeneration(params: { - chatId?: string; - currentChat: CurrentChatSummary; - isNewChat: boolean; - message: string; - titleModel: string; - titleProvider?: string; - workspaceId?: string; - requestId: string; - publisher: StreamWriter; - otelContext?: Context; + chatId?: string + currentChat: CurrentChatSummary + isNewChat: boolean + message: string + titleModel: string + titleProvider?: string + workspaceId?: string + requestId: string + publisher: StreamWriter + otelContext?: Context }): void { const { chatId, @@ -361,8 +332,8 @@ function fireTitleGeneration(params: { requestId, publisher, otelContext, - } = params; - if (!chatId || currentChat?.title || !isNewChat) return; + } = params + if (!chatId || currentChat?.title || !isNewChat) return requestChatTitle({ message, @@ -371,26 +342,23 @@ function fireTitleGeneration(params: { otelContext, }) .then(async (title) => { - if (!title) return; - await db - .update(copilotChats) - .set({ title }) - .where(eq(copilotChats.id, chatId)); + if (!title) return + await db.update(copilotChats).set({ title }).where(eq(copilotChats.id, chatId)) await publisher.publish({ type: MothershipStreamV1EventType.session, payload: { kind: MothershipStreamV1SessionKind.title, title }, - }); + }) if (workspaceId) { taskPubSub?.publishStatusChanged({ workspaceId, chatId, - type: "renamed", - }); + type: 'renamed', + }) } }) .catch((error) => { - logger.error(`[${requestId}] Title generation failed:`, error); - }); + logger.error(`[${requestId}] Title generation failed:`, error) + }) } // --------------------------------------------------------------------------- @@ -398,57 +366,53 @@ function fireTitleGeneration(params: { // --------------------------------------------------------------------------- export async function requestChatTitle(params: { - message: string; - model: string; - provider?: string; - otelContext?: Context; + message: string + model: string + provider?: string + otelContext?: Context }): Promise { - const { message, model, provider, otelContext } = params; - if (!message || !model) return null; + const { message, model, provider, otelContext } = params + if (!message || !model) return null const headers: Record = { - "Content-Type": "application/json", - }; + 'Content-Type': 'application/json', + } if (env.COPILOT_API_KEY) { - headers["x-api-key"] = env.COPILOT_API_KEY; + headers['x-api-key'] = env.COPILOT_API_KEY } try { - const { fetchGo } = await import("@/lib/copilot/request/go/fetch"); - const response = await fetchGo( - `${SIM_AGENT_API_URL}/api/generate-chat-title`, - { - method: "POST", - headers, - body: JSON.stringify({ - message, - model, - ...(provider ? { provider } : {}), - }), - otelContext, - spanName: "sim → go /api/generate-chat-title", - operation: "generate_chat_title", - attributes: { - "gen_ai.request.model": model, - ...(provider ? { "gen_ai.system": provider } : {}), - }, + const { fetchGo } = await import('@/lib/copilot/request/go/fetch') + const response = await fetchGo(`${SIM_AGENT_API_URL}/api/generate-chat-title`, { + method: 'POST', + headers, + body: JSON.stringify({ + message, + model, + ...(provider ? { provider } : {}), + }), + otelContext, + spanName: 'sim → go /api/generate-chat-title', + operation: 'generate_chat_title', + attributes: { + 'gen_ai.request.model': model, + ...(provider ? { 'gen_ai.system': provider } : {}), }, - ); + }) - const payload = await response.json().catch(() => ({})); + const payload = await response.json().catch(() => ({})) if (!response.ok) { - logger.warn("Failed to generate chat title via copilot backend", { + logger.warn('Failed to generate chat title via copilot backend', { status: response.status, error: payload, - }); - return null; + }) + return null } - const title = - typeof payload?.title === "string" ? payload.title.trim() : ""; - return title || null; + const title = typeof payload?.title === 'string' ? payload.title.trim() : '' + return title || null } catch (error) { - logger.error("Error generating chat title:", error); - return null; + logger.error('Error generating chat title:', error) + return null } } diff --git a/apps/sim/lib/copilot/request/otel.ts b/apps/sim/lib/copilot/request/otel.ts index 15e2425c001..48d13784179 100644 --- a/apps/sim/lib/copilot/request/otel.ts +++ b/apps/sim/lib/copilot/request/otel.ts @@ -1,18 +1,18 @@ -import { randomBytes } from "crypto"; +import { randomBytes } from 'crypto' import { + type Context, context, ROOT_CONTEXT, + type Span, + type SpanContext, SpanKind, SpanStatusCode, TraceFlags, trace, - type Context, - type Span, - type SpanContext, -} from "@opentelemetry/api"; -import type { RequestTraceV1Outcome } from "@/lib/copilot/generated/request-trace-v1"; -import { TraceSpan } from "@/lib/copilot/generated/trace-spans-v1"; -import { contextFromRequestHeaders } from "@/lib/copilot/request/go/propagation"; +} from '@opentelemetry/api' +import type { RequestTraceV1Outcome } from '@/lib/copilot/generated/request-trace-v1' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' +import { contextFromRequestHeaders } from '@/lib/copilot/request/go/propagation' /** * Reuse the generated RequestTraceV1Outcome string values for every @@ -22,7 +22,7 @@ import { contextFromRequestHeaders } from "@/lib/copilot/request/go/propagation" * without scattering the literals through the codebase. */ export type CopilotLifecycleOutcome = - (typeof RequestTraceV1Outcome)[keyof typeof RequestTraceV1Outcome]; + (typeof RequestTraceV1Outcome)[keyof typeof RequestTraceV1Outcome] /** * Resolve the tracer lazily on every call. With Next.js 16 + Turbopack dev @@ -34,11 +34,11 @@ export type CopilotLifecycleOutcome = * per request ensures we always pick up the currently-registered provider. */ export function getCopilotTracer() { - return trace.getTracer("sim-ai-platform", "1.0.0"); + return trace.getTracer('sim-ai-platform', '1.0.0') } function getTracer() { - return getCopilotTracer(); + return getCopilotTracer() } /** @@ -59,33 +59,31 @@ export async function withIncomingGoSpan( headers: Headers, spanName: string, attributes: Record | undefined, - fn: (span: Span) => Promise, + fn: (span: Span) => Promise ): Promise { - const parentContext = contextFromRequestHeaders(headers); - const tracer = getTracer(); + const parentContext = contextFromRequestHeaders(headers) + const tracer = getTracer() return tracer.startActiveSpan( spanName, { kind: SpanKind.SERVER, attributes }, parentContext, async (span) => { try { - const result = await fn(span); - span.setStatus({ code: SpanStatusCode.OK }); - return result; + const result = await fn(span) + span.setStatus({ code: SpanStatusCode.OK }) + return result } catch (error) { span.setStatus({ code: SpanStatusCode.ERROR, message: error instanceof Error ? error.message : String(error), - }); - span.recordException( - error instanceof Error ? error : new Error(String(error)), - ); - throw error; + }) + span.recordException(error instanceof Error ? error : new Error(String(error))) + throw error } finally { - span.end(); + span.end() } - }, - ); + } + ) } /** @@ -114,36 +112,29 @@ export async function withCopilotSpan( * framework span is currently active (which then gets dropped by our * sampler, stranding this span in the trace). */ - parentContext?: Context, + parentContext?: Context ): Promise { - const tracer = getTracer(); + const tracer = getTracer() const runBody = async (span: Span) => { try { - const result = await fn(span); - span.setStatus({ code: SpanStatusCode.OK }); - return result; + const result = await fn(span) + span.setStatus({ code: SpanStatusCode.OK }) + return result } catch (error) { span.setStatus({ code: SpanStatusCode.ERROR, message: error instanceof Error ? error.message : String(error), - }); - span.recordException( - error instanceof Error ? error : new Error(String(error)), - ); - throw error; + }) + span.recordException(error instanceof Error ? error : new Error(String(error))) + throw error } finally { - span.end(); + span.end() } - }; + } if (parentContext) { - return tracer.startActiveSpan( - spanName, - { attributes }, - parentContext, - runBody, - ); + return tracer.startActiveSpan(spanName, { attributes }, parentContext, runBody) } - return tracer.startActiveSpan(spanName, { attributes }, runBody); + return tracer.startActiveSpan(spanName, { attributes }, runBody) } /** @@ -154,78 +145,74 @@ export async function withCopilotSpan( */ export async function withCopilotToolSpan( input: { - toolName: string; - toolCallId: string; - runId?: string; - chatId?: string; - argsBytes?: number; - argsPreview?: string; + toolName: string + toolCallId: string + runId?: string + chatId?: string + argsBytes?: number + argsPreview?: string }, - fn: (span: Span) => Promise, + fn: (span: Span) => Promise ): Promise { - const tracer = getTracer(); + const tracer = getTracer() return tracer.startActiveSpan( `tool.execute ${input.toolName}`, { attributes: { - "tool.name": input.toolName, - "tool.call_id": input.toolCallId, - "tool.executor": "sim", - ...(input.runId ? { "run.id": input.runId } : {}), - ...(input.chatId ? { "chat.id": input.chatId } : {}), - ...(typeof input.argsBytes === "number" - ? { "tool.args.bytes": input.argsBytes } - : {}), - ...(input.argsPreview ? { "tool.args.preview": input.argsPreview } : {}), + 'tool.name': input.toolName, + 'tool.call_id': input.toolCallId, + 'tool.executor': 'sim', + ...(input.runId ? { 'run.id': input.runId } : {}), + ...(input.chatId ? { 'chat.id': input.chatId } : {}), + ...(typeof input.argsBytes === 'number' ? { 'tool.args.bytes': input.argsBytes } : {}), + ...(input.argsPreview ? { 'tool.args.preview': input.argsPreview } : {}), }, }, async (span) => { try { - const result = await fn(span); - span.setStatus({ code: SpanStatusCode.OK }); - return result; + const result = await fn(span) + span.setStatus({ code: SpanStatusCode.OK }) + return result } catch (error) { span.setStatus({ code: SpanStatusCode.ERROR, message: error instanceof Error ? error.message : String(error), - }); - span.recordException( - error instanceof Error ? error : new Error(String(error)), - ); - throw error; + }) + span.recordException(error instanceof Error ? error : new Error(String(error))) + throw error } finally { - span.end(); + span.end() } - }, - ); + } + ) } function isValidSpanContext(spanContext: SpanContext): boolean { return ( /^[0-9a-f]{32}$/.test(spanContext.traceId) && - spanContext.traceId !== "00000000000000000000000000000000" && + spanContext.traceId !== '00000000000000000000000000000000' && /^[0-9a-f]{16}$/.test(spanContext.spanId) && - spanContext.spanId !== "0000000000000000" - ); + spanContext.spanId !== '0000000000000000' + ) } function createFallbackSpanContext(): SpanContext { return { - traceId: randomBytes(16).toString("hex"), - spanId: randomBytes(8).toString("hex"), + traceId: randomBytes(16).toString('hex'), + spanId: randomBytes(8).toString('hex'), traceFlags: TraceFlags.SAMPLED, - }; + } } export interface CopilotOtelScope { - requestId: string; - route?: string; - chatId?: string; - workflowId?: string; - executionId?: string; - runId?: string; - streamId?: string; - transport: "headless" | "stream"; + requestId: string + route?: string + chatId?: string + workflowId?: string + executionId?: string + runId?: string + streamId?: string + transport: 'headless' | 'stream' } /** @@ -235,24 +222,22 @@ export interface CopilotOtelScope { * span to outlive the synchronous handler body — e.g. SSE routes). */ function buildAgentSpanAttributes( - scope: CopilotOtelScope, + scope: CopilotOtelScope ): Record { return { - "gen_ai.agent.name": "mothership", - "gen_ai.agent.id": - scope.transport === "stream" ? "mothership-stream" : "mothership-headless", - "gen_ai.operation.name": - scope.transport === "stream" ? "chat" : "invoke_agent", - "request.id": scope.requestId, - "sim.request_id": scope.requestId, - "copilot.route": scope.route ?? "", - "copilot.transport": scope.transport, - ...(scope.chatId ? { "chat.id": scope.chatId } : {}), - ...(scope.workflowId ? { "workflow.id": scope.workflowId } : {}), - ...(scope.executionId ? { "workflow.execution_id": scope.executionId } : {}), - ...(scope.runId ? { "run.id": scope.runId } : {}), - ...(scope.streamId ? { "stream.id": scope.streamId } : {}), - }; + 'gen_ai.agent.name': 'mothership', + 'gen_ai.agent.id': scope.transport === 'stream' ? 'mothership-stream' : 'mothership-headless', + 'gen_ai.operation.name': scope.transport === 'stream' ? 'chat' : 'invoke_agent', + 'request.id': scope.requestId, + 'sim.request_id': scope.requestId, + 'copilot.route': scope.route ?? '', + 'copilot.transport': scope.transport, + ...(scope.chatId ? { 'chat.id': scope.chatId } : {}), + ...(scope.workflowId ? { 'workflow.id': scope.workflowId } : {}), + ...(scope.executionId ? { 'workflow.execution_id': scope.executionId } : {}), + ...(scope.runId ? { 'run.id': scope.runId } : {}), + ...(scope.streamId ? { 'stream.id': scope.streamId } : {}), + } } /** @@ -274,9 +259,9 @@ function buildAgentSpanAttributes( * you. */ export interface CopilotOtelRoot { - span: Span; - context: Context; - finish: (outcome?: CopilotLifecycleOutcome, error?: unknown) => void; + span: Span + context: Context + finish: (outcome?: CopilotLifecycleOutcome, error?: unknown) => void } export function startCopilotOtelRoot(scope: CopilotOtelScope): CopilotOtelRoot { @@ -287,71 +272,67 @@ export function startCopilotOtelRoot(scope: CopilotOtelScope): CopilotOtelRoot { // warning) and any descendant whose AsyncLocalStorage propagation was // disrupted would inherit the same dropped parent. Starting from // ROOT_CONTEXT gives the mothership lifecycle its own clean trace tree. - const parentContext = ROOT_CONTEXT; + const parentContext = ROOT_CONTEXT const span = getTracer().startSpan( TraceSpan.GenAiAgentExecute, { attributes: buildAgentSpanAttributes(scope) }, - parentContext, - ); + parentContext + ) const carrierSpan = isValidSpanContext(span.spanContext()) ? span - : trace.wrapSpanContext(createFallbackSpanContext()); - const rootContext = trace.setSpan(parentContext, carrierSpan); + : trace.wrapSpanContext(createFallbackSpanContext()) + const rootContext = trace.setSpan(parentContext, carrierSpan) - let finished = false; - const finish: CopilotOtelRoot["finish"] = (outcome = "success", error) => { - if (finished) return; - finished = true; - span.setAttribute("copilot.request.outcome", outcome); + let finished = false + const finish: CopilotOtelRoot['finish'] = (outcome, error) => { + if (finished) return + finished = true + span.setAttribute('copilot.request.outcome', outcome) if (error) { span.setStatus({ code: SpanStatusCode.ERROR, message: error instanceof Error ? error.message : String(error), - }); - span.recordException( - error instanceof Error ? error : new Error(String(error)), - ); - } else if (outcome === "success") { - span.setStatus({ code: SpanStatusCode.OK }); + }) + span.recordException(error instanceof Error ? error : new Error(String(error))) + } else if (outcome === 'success') { + span.setStatus({ code: SpanStatusCode.OK }) } - span.end(); - }; + span.end() + } - return { span, context: rootContext, finish }; + return { span, context: rootContext, finish } } export async function withCopilotOtelContext( scope: CopilotOtelScope, - fn: (otelContext: Context) => Promise, + fn: (otelContext: Context) => Promise ): Promise { - const parentContext = context.active(); + const parentContext = context.active() const span = getTracer().startSpan( TraceSpan.GenAiAgentExecute, { attributes: buildAgentSpanAttributes(scope) }, - parentContext, - ); + parentContext + ) const carrierSpan = isValidSpanContext(span.spanContext()) ? span - : trace.wrapSpanContext(createFallbackSpanContext()); - const otelContext = trace.setSpan(parentContext, carrierSpan); - let sawError = false; + : trace.wrapSpanContext(createFallbackSpanContext()) + const otelContext = trace.setSpan(parentContext, carrierSpan) + let sawError = false try { - return await context.with(otelContext, () => fn(otelContext)); + return await context.with(otelContext, () => fn(otelContext)) } catch (error) { - sawError = true; + sawError = true span.setStatus({ code: SpanStatusCode.ERROR, message: error instanceof Error ? error.message : String(error), - }); - span.recordException( - error instanceof Error ? error : new Error(String(error)), - ); - throw error; + }) + span.recordException(error instanceof Error ? error : new Error(String(error))) + throw error } finally { if (!sawError) { - span.setStatus({ code: SpanStatusCode.OK }); + span.setStatus({ code: SpanStatusCode.OK }) } - span.end(); + span.end() } } diff --git a/apps/sim/lib/copilot/request/session/contract.test.ts b/apps/sim/lib/copilot/request/session/contract.test.ts index 37724d874e4..3d50fdd64eb 100644 --- a/apps/sim/lib/copilot/request/session/contract.test.ts +++ b/apps/sim/lib/copilot/request/session/contract.test.ts @@ -2,215 +2,215 @@ * @vitest-environment node */ -import { describe, expect, it } from "vitest"; +import { describe, expect, it } from 'vitest' import { isContractStreamEventEnvelope, isSyntheticFilePreviewEventEnvelope, parsePersistedStreamEventEnvelope, parsePersistedStreamEventEnvelopeJson, -} from "./contract"; +} from './contract' const BASE_ENVELOPE = { v: 1 as const, seq: 1, - ts: "2026-04-11T00:00:00.000Z", + ts: '2026-04-11T00:00:00.000Z', stream: { - streamId: "stream-1", - cursor: "1", + streamId: 'stream-1', + cursor: '1', }, trace: { - requestId: "req-1", + requestId: 'req-1', }, -}; +} -describe("stream session contract parser", () => { - it("accepts contract text events", () => { +describe('stream session contract parser', () => { + it('accepts contract text events', () => { const event = { ...BASE_ENVELOPE, trace: { ...BASE_ENVELOPE.trace, - goTraceId: "go-trace-1", + goTraceId: 'go-trace-1', }, - type: "text" as const, + type: 'text' as const, payload: { - channel: "assistant" as const, - text: "hello", + channel: 'assistant' as const, + text: 'hello', }, - }; + } - expect(isContractStreamEventEnvelope(event)).toBe(true); + expect(isContractStreamEventEnvelope(event)).toBe(true) - const parsed = parsePersistedStreamEventEnvelope(event); + const parsed = parsePersistedStreamEventEnvelope(event) expect(parsed).toEqual({ ok: true, event, - }); - }); + }) + }) - it("accepts contract session chat events", () => { + it('accepts contract session chat events', () => { const event = { ...BASE_ENVELOPE, - type: "session" as const, - payload: { kind: "chat" as const, chatId: "chat-1" }, - }; + type: 'session' as const, + payload: { kind: 'chat' as const, chatId: 'chat-1' }, + } - expect(isContractStreamEventEnvelope(event)).toBe(true); - expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true); - }); + expect(isContractStreamEventEnvelope(event)).toBe(true) + expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true) + }) - it("accepts contract complete events", () => { + it('accepts contract complete events', () => { const event = { ...BASE_ENVELOPE, - type: "complete" as const, - payload: { status: "complete" as const }, - }; + type: 'complete' as const, + payload: { status: 'complete' as const }, + } - expect(isContractStreamEventEnvelope(event)).toBe(true); - expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true); - }); + expect(isContractStreamEventEnvelope(event)).toBe(true) + expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true) + }) - it("accepts contract error events", () => { + it('accepts contract error events', () => { const event = { ...BASE_ENVELOPE, - type: "error" as const, - payload: { message: "something went wrong" }, - }; + type: 'error' as const, + payload: { message: 'something went wrong' }, + } - expect(isContractStreamEventEnvelope(event)).toBe(true); - expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true); - }); + expect(isContractStreamEventEnvelope(event)).toBe(true) + expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true) + }) - it("accepts contract tool call events", () => { + it('accepts contract tool call events', () => { const event = { ...BASE_ENVELOPE, - type: "tool" as const, + type: 'tool' as const, payload: { - toolCallId: "tc-1", - toolName: "read", - phase: "call" as const, - executor: "sim" as const, - mode: "sync" as const, + toolCallId: 'tc-1', + toolName: 'read', + phase: 'call' as const, + executor: 'sim' as const, + mode: 'sync' as const, }, - }; + } - expect(isContractStreamEventEnvelope(event)).toBe(true); - expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true); - }); + expect(isContractStreamEventEnvelope(event)).toBe(true) + expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true) + }) - it("accepts contract span events", () => { + it('accepts contract span events', () => { const event = { ...BASE_ENVELOPE, - type: "span" as const, + type: 'span' as const, payload: { - kind: "subagent" as const, - event: "start" as const, - agent: "file", + kind: 'subagent' as const, + event: 'start' as const, + agent: 'file', }, - }; + } - expect(isContractStreamEventEnvelope(event)).toBe(true); - expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true); - }); + expect(isContractStreamEventEnvelope(event)).toBe(true) + expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true) + }) - it("accepts contract resource events", () => { + it('accepts contract resource events', () => { const event = { ...BASE_ENVELOPE, - type: "resource" as const, + type: 'resource' as const, payload: { - op: "upsert" as const, - resource: { id: "r-1", type: "file", title: "test.md" }, + op: 'upsert' as const, + resource: { id: 'r-1', type: 'file', title: 'test.md' }, }, - }; + } - expect(isContractStreamEventEnvelope(event)).toBe(true); - expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true); - }); + expect(isContractStreamEventEnvelope(event)).toBe(true) + expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true) + }) - it("accepts contract run events", () => { + it('accepts contract run events', () => { const event = { ...BASE_ENVELOPE, - type: "run" as const, - payload: { kind: "compaction_start" as const }, - }; + type: 'run' as const, + payload: { kind: 'compaction_start' as const }, + } - expect(isContractStreamEventEnvelope(event)).toBe(true); - expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true); - }); + expect(isContractStreamEventEnvelope(event)).toBe(true) + expect(parsePersistedStreamEventEnvelope(event).ok).toBe(true) + }) - it("accepts synthetic file preview events", () => { + it('accepts synthetic file preview events', () => { const event = { ...BASE_ENVELOPE, - type: "tool" as const, + type: 'tool' as const, payload: { - toolCallId: "preview-1", - toolName: "workspace_file" as const, - previewPhase: "file_preview_content" as const, - content: "draft body", - contentMode: "snapshot" as const, + toolCallId: 'preview-1', + toolName: 'workspace_file' as const, + previewPhase: 'file_preview_content' as const, + content: 'draft body', + contentMode: 'snapshot' as const, previewVersion: 2, - fileName: "draft.md", + fileName: 'draft.md', }, - }; + } - expect(isSyntheticFilePreviewEventEnvelope(event)).toBe(true); + expect(isSyntheticFilePreviewEventEnvelope(event)).toBe(true) - const parsed = parsePersistedStreamEventEnvelope(event); + const parsed = parsePersistedStreamEventEnvelope(event) expect(parsed).toEqual({ ok: true, event, - }); - }); + }) + }) - it("rejects invalid tool events with structured validation errors", () => { + it('rejects invalid tool events with structured validation errors', () => { const parsed = parsePersistedStreamEventEnvelope({ ...BASE_ENVELOPE, - type: "tool", + type: 'tool', payload: { - toolCallId: "tool-1", - toolName: "read", + toolCallId: 'tool-1', + toolName: 'read', }, - }); + }) - expect(parsed.ok).toBe(false); + expect(parsed.ok).toBe(false) if (parsed.ok) { - throw new Error("expected invalid result"); + throw new Error('expected invalid result') } - expect(parsed.reason).toBe("invalid_stream_event"); - }); + expect(parsed.reason).toBe('invalid_stream_event') + }) - it("rejects unknown event types", () => { + it('rejects unknown event types', () => { const parsed = parsePersistedStreamEventEnvelope({ ...BASE_ENVELOPE, - type: "unknown_type", + type: 'unknown_type', payload: {}, - }); + }) - expect(parsed.ok).toBe(false); + expect(parsed.ok).toBe(false) if (parsed.ok) { - throw new Error("expected invalid result"); + throw new Error('expected invalid result') } - expect(parsed.reason).toBe("invalid_stream_event"); - expect(parsed.errors).toContain('unknown type="unknown_type"'); - }); + expect(parsed.reason).toBe('invalid_stream_event') + expect(parsed.errors).toContain('unknown type="unknown_type"') + }) - it("rejects non-object values", () => { - const parsed = parsePersistedStreamEventEnvelope("not an object"); + it('rejects non-object values', () => { + const parsed = parsePersistedStreamEventEnvelope('not an object') - expect(parsed.ok).toBe(false); + expect(parsed.ok).toBe(false) if (parsed.ok) { - throw new Error("expected invalid result"); + throw new Error('expected invalid result') } - expect(parsed.reason).toBe("invalid_stream_event"); - expect(parsed.errors).toContain("value is not an object"); - }); + expect(parsed.reason).toBe('invalid_stream_event') + expect(parsed.errors).toContain('value is not an object') + }) - it("reports invalid JSON separately from schema failures", () => { - const parsed = parsePersistedStreamEventEnvelopeJson("{"); + it('reports invalid JSON separately from schema failures', () => { + const parsed = parsePersistedStreamEventEnvelopeJson('{') - expect(parsed.ok).toBe(false); + expect(parsed.ok).toBe(false) if (parsed.ok) { - throw new Error("expected invalid json result"); + throw new Error('expected invalid json result') } - expect(parsed.reason).toBe("invalid_json"); - }); -}); + expect(parsed.reason).toBe('invalid_json') + }) +}) diff --git a/apps/sim/lib/copilot/request/session/contract.ts b/apps/sim/lib/copilot/request/session/contract.ts index 556e07e9261..7953a11c956 100644 --- a/apps/sim/lib/copilot/request/session/contract.ts +++ b/apps/sim/lib/copilot/request/session/contract.ts @@ -3,7 +3,7 @@ import type { MothershipStreamV1StreamRef, MothershipStreamV1StreamScope, MothershipStreamV1Trace, -} from "@/lib/copilot/generated/mothership-stream-v1"; +} from '@/lib/copilot/generated/mothership-stream-v1' import { MothershipStreamV1EventType, MothershipStreamV1ResourceOp, @@ -12,79 +12,78 @@ import { MothershipStreamV1SpanPayloadKind, MothershipStreamV1TextChannel, MothershipStreamV1ToolPhase, -} from "@/lib/copilot/generated/mothership-stream-v1"; -import type { FilePreviewTargetKind } from "./file-preview-session-contract"; +} from '@/lib/copilot/generated/mothership-stream-v1' +import type { FilePreviewTargetKind } from './file-preview-session-contract' -type JsonRecord = Record; +type JsonRecord = Record const FILE_PREVIEW_PHASE = { - start: "file_preview_start", - target: "file_preview_target", - editMeta: "file_preview_edit_meta", - content: "file_preview_content", - complete: "file_preview_complete", -} as const; + start: 'file_preview_start', + target: 'file_preview_target', + editMeta: 'file_preview_edit_meta', + content: 'file_preview_content', + complete: 'file_preview_complete', +} as const type EnvelopeToStreamEvent = T extends { - type: infer TType; - payload: infer TPayload; - scope?: infer TScope; + type: infer TType + payload: infer TPayload + scope?: infer TScope } ? { type: TType; payload: TPayload; scope?: Exclude } - : never; + : never -export type SyntheticFilePreviewPhase = - (typeof FILE_PREVIEW_PHASE)[keyof typeof FILE_PREVIEW_PHASE]; +export type SyntheticFilePreviewPhase = (typeof FILE_PREVIEW_PHASE)[keyof typeof FILE_PREVIEW_PHASE] export interface SyntheticFilePreviewTarget { - kind: FilePreviewTargetKind; - fileId?: string; - fileName?: string; + kind: FilePreviewTargetKind + fileId?: string + fileName?: string } export interface SyntheticFilePreviewStartPayload { - previewPhase: typeof FILE_PREVIEW_PHASE.start; - toolCallId: string; - toolName: "workspace_file"; + previewPhase: typeof FILE_PREVIEW_PHASE.start + toolCallId: string + toolName: 'workspace_file' } export interface SyntheticFilePreviewTargetPayload { - operation?: string; - previewPhase: typeof FILE_PREVIEW_PHASE.target; - target: SyntheticFilePreviewTarget; - title?: string; - toolCallId: string; - toolName: "workspace_file"; + operation?: string + previewPhase: typeof FILE_PREVIEW_PHASE.target + target: SyntheticFilePreviewTarget + title?: string + toolCallId: string + toolName: 'workspace_file' } export interface SyntheticFilePreviewEditMetaPayload { - edit: JsonRecord; - previewPhase: typeof FILE_PREVIEW_PHASE.editMeta; - toolCallId: string; - toolName: "workspace_file"; + edit: JsonRecord + previewPhase: typeof FILE_PREVIEW_PHASE.editMeta + toolCallId: string + toolName: 'workspace_file' } export interface SyntheticFilePreviewContentPayload { - content: string; - contentMode: "delta" | "snapshot"; - edit?: JsonRecord; - fileId?: string; - fileName: string; - operation?: string; - previewPhase: typeof FILE_PREVIEW_PHASE.content; - previewVersion: number; - targetKind?: string; - toolCallId: string; - toolName: "workspace_file"; + content: string + contentMode: 'delta' | 'snapshot' + edit?: JsonRecord + fileId?: string + fileName: string + operation?: string + previewPhase: typeof FILE_PREVIEW_PHASE.content + previewVersion: number + targetKind?: string + toolCallId: string + toolName: 'workspace_file' } export interface SyntheticFilePreviewCompletePayload { - fileId?: string; - output?: unknown; - previewPhase: typeof FILE_PREVIEW_PHASE.complete; - previewVersion?: number; - toolCallId: string; - toolName: "workspace_file"; + fileId?: string + output?: unknown + previewPhase: typeof FILE_PREVIEW_PHASE.complete + previewVersion?: number + toolCallId: string + toolName: 'workspace_file' } export type SyntheticFilePreviewPayload = @@ -92,105 +91,101 @@ export type SyntheticFilePreviewPayload = | SyntheticFilePreviewTargetPayload | SyntheticFilePreviewEditMetaPayload | SyntheticFilePreviewContentPayload - | SyntheticFilePreviewCompletePayload; + | SyntheticFilePreviewCompletePayload export interface SyntheticFilePreviewEventEnvelope { - payload: SyntheticFilePreviewPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "tool"; - v: 1; + payload: SyntheticFilePreviewPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'tool' + v: 1 } export type PersistedStreamEventEnvelope = | MothershipStreamV1EventEnvelope - | SyntheticFilePreviewEventEnvelope; - -export type ContractStreamEvent = - EnvelopeToStreamEvent; -export type SyntheticStreamEvent = - EnvelopeToStreamEvent; -export type SessionStreamEvent = ContractStreamEvent | SyntheticStreamEvent; -export type StreamEvent = SessionStreamEvent; + | SyntheticFilePreviewEventEnvelope + +export type ContractStreamEvent = EnvelopeToStreamEvent +export type SyntheticStreamEvent = EnvelopeToStreamEvent +export type SessionStreamEvent = ContractStreamEvent | SyntheticStreamEvent +export type StreamEvent = SessionStreamEvent export type ToolCallStreamEvent = Extract< ContractStreamEvent, - { type: "tool"; payload: { phase: "call" } } ->; + { type: 'tool'; payload: { phase: 'call' } } +> export type ToolArgsDeltaStreamEvent = Extract< ContractStreamEvent, - { type: "tool"; payload: { phase: "args_delta" } } ->; + { type: 'tool'; payload: { phase: 'args_delta' } } +> export type ToolResultStreamEvent = Extract< ContractStreamEvent, - { type: "tool"; payload: { phase: "result" } } ->; + { type: 'tool'; payload: { phase: 'result' } } +> export type SubagentSpanStreamEvent = Extract< ContractStreamEvent, - { type: "span"; payload: { kind: "subagent" } } ->; + { type: 'span'; payload: { kind: 'subagent' } } +> export interface ParseStreamEventEnvelopeSuccess { - ok: true; - event: PersistedStreamEventEnvelope; + ok: true + event: PersistedStreamEventEnvelope } export interface ParseStreamEventEnvelopeFailure { - errors?: string[]; - message: string; - ok: false; - reason: "invalid_json" | "invalid_stream_event"; + errors?: string[] + message: string + ok: false + reason: 'invalid_json' | 'invalid_stream_event' } export type ParseStreamEventEnvelopeResult = | ParseStreamEventEnvelopeSuccess - | ParseStreamEventEnvelopeFailure; + | ParseStreamEventEnvelopeFailure // --------------------------------------------------------------------------- // Structural helpers (CSP-safe – no codegen / eval / new Function) // --------------------------------------------------------------------------- function isRecord(value: unknown): value is JsonRecord { - return Boolean(value) && typeof value === "object" && !Array.isArray(value); + return Boolean(value) && typeof value === 'object' && !Array.isArray(value) } function isOptionalString(value: unknown): value is string | undefined { - return value === undefined || typeof value === "string"; + return value === undefined || typeof value === 'string' } function isOptionalFiniteNumber(value: unknown): value is number | undefined { - return ( - value === undefined || (typeof value === "number" && Number.isFinite(value)) - ); + return value === undefined || (typeof value === 'number' && Number.isFinite(value)) } function isStreamRef(value: unknown): value is MothershipStreamV1StreamRef { return ( isRecord(value) && - typeof value.streamId === "string" && + typeof value.streamId === 'string' && isOptionalString(value.chatId) && isOptionalString(value.cursor) - ); + ) } function isTrace(value: unknown): value is MothershipStreamV1Trace { return ( isRecord(value) && - typeof value.requestId === "string" && + typeof value.requestId === 'string' && isOptionalString(value.goTraceId) && isOptionalString(value.spanId) - ); + ) } function isStreamScope(value: unknown): value is MothershipStreamV1StreamScope { return ( isRecord(value) && - value.lane === "subagent" && + value.lane === 'subagent' && isOptionalString(value.agentId) && isOptionalString(value.parentToolCallId) - ); + ) } // --------------------------------------------------------------------------- @@ -203,46 +198,42 @@ function isStreamScope(value: unknown): value is MothershipStreamV1StreamScope { // structural checking to safely dispatch inside the switch statement. // --------------------------------------------------------------------------- -const KNOWN_EVENT_TYPES: ReadonlySet = new Set( - Object.values(MothershipStreamV1EventType), -); +const KNOWN_EVENT_TYPES: ReadonlySet = new Set(Object.values(MothershipStreamV1EventType)) function isValidEnvelopeShell(value: unknown): value is JsonRecord & { - v: 1; - seq: number; - ts: string; - stream: MothershipStreamV1StreamRef; - type: string; - payload: JsonRecord; + v: 1 + seq: number + ts: string + stream: MothershipStreamV1StreamRef + type: string + payload: JsonRecord } { - if (!isRecord(value)) return false; - if (value.v !== 1) return false; - if (typeof value.seq !== "number" || !Number.isFinite(value.seq)) - return false; - if (typeof value.ts !== "string") return false; - if (!isStreamRef(value.stream)) return false; - if (value.trace !== undefined && !isTrace(value.trace)) return false; - if (value.scope !== undefined && !isStreamScope(value.scope)) return false; - if (typeof value.type !== "string" || !KNOWN_EVENT_TYPES.has(value.type)) - return false; - if (!isRecord(value.payload)) return false; - return true; + if (!isRecord(value)) return false + if (value.v !== 1) return false + if (typeof value.seq !== 'number' || !Number.isFinite(value.seq)) return false + if (typeof value.ts !== 'string') return false + if (!isStreamRef(value.stream)) return false + if (value.trace !== undefined && !isTrace(value.trace)) return false + if (value.scope !== undefined && !isStreamScope(value.scope)) return false + if (typeof value.type !== 'string' || !KNOWN_EVENT_TYPES.has(value.type)) return false + if (!isRecord(value.payload)) return false + return true } function isValidSessionPayload(payload: JsonRecord): boolean { - const kind = payload.kind; - if (typeof kind !== "string") return false; + const kind = payload.kind + if (typeof kind !== 'string') return false switch (kind) { case MothershipStreamV1SessionKind.start: - return true; + return true case MothershipStreamV1SessionKind.chat: - return typeof payload.chatId === "string"; + return typeof payload.chatId === 'string' case MothershipStreamV1SessionKind.title: - return typeof payload.title === "string"; + return typeof payload.title === 'string' case MothershipStreamV1SessionKind.trace: - return typeof payload.requestId === "string"; + return typeof payload.requestId === 'string' default: - return false; + return false } } @@ -250,28 +241,28 @@ function isValidTextPayload(payload: JsonRecord): boolean { return ( (payload.channel === MothershipStreamV1TextChannel.assistant || payload.channel === MothershipStreamV1TextChannel.thinking) && - typeof payload.text === "string" - ); + typeof payload.text === 'string' + ) } function isValidToolPayload(payload: JsonRecord): boolean { - if (typeof payload.toolCallId !== "string") return false; - if (typeof payload.toolName !== "string") return false; - const phase = payload.phase; + if (typeof payload.toolCallId !== 'string') return false + if (typeof payload.toolName !== 'string') return false + const phase = payload.phase return ( phase === MothershipStreamV1ToolPhase.call || phase === MothershipStreamV1ToolPhase.args_delta || phase === MothershipStreamV1ToolPhase.result - ); + ) } function isValidSpanPayload(payload: JsonRecord): boolean { - const kind = payload.kind; + const kind = payload.kind return ( kind === MothershipStreamV1SpanPayloadKind.subagent || kind === MothershipStreamV1SpanPayloadKind.structured_result || kind === MothershipStreamV1SpanPayloadKind.subagent_result - ); + ) } function isValidResourcePayload(payload: JsonRecord): boolean { @@ -279,55 +270,51 @@ function isValidResourcePayload(payload: JsonRecord): boolean { (payload.op === MothershipStreamV1ResourceOp.upsert || payload.op === MothershipStreamV1ResourceOp.remove) && isRecord(payload.resource) && - typeof (payload.resource as JsonRecord).id === "string" && - typeof (payload.resource as JsonRecord).type === "string" - ); + typeof (payload.resource as JsonRecord).id === 'string' && + typeof (payload.resource as JsonRecord).type === 'string' + ) } function isValidRunPayload(payload: JsonRecord): boolean { - const kind = payload.kind; + const kind = payload.kind return ( kind === MothershipStreamV1RunKind.checkpoint_pause || kind === MothershipStreamV1RunKind.resumed || kind === MothershipStreamV1RunKind.compaction_start || kind === MothershipStreamV1RunKind.compaction_done - ); + ) } function isValidErrorPayload(payload: JsonRecord): boolean { - return ( - typeof payload.message === "string" || typeof payload.error === "string" - ); + return typeof payload.message === 'string' || typeof payload.error === 'string' } function isValidCompletePayload(payload: JsonRecord): boolean { - return typeof payload.status === "string"; + return typeof payload.status === 'string' } -function isContractEnvelope( - value: unknown, -): value is MothershipStreamV1EventEnvelope { - if (!isValidEnvelopeShell(value)) return false; - const payload = value.payload as JsonRecord; +function isContractEnvelope(value: unknown): value is MothershipStreamV1EventEnvelope { + if (!isValidEnvelopeShell(value)) return false + const payload = value.payload as JsonRecord switch (value.type) { case MothershipStreamV1EventType.session: - return isValidSessionPayload(payload); + return isValidSessionPayload(payload) case MothershipStreamV1EventType.text: - return isValidTextPayload(payload); + return isValidTextPayload(payload) case MothershipStreamV1EventType.tool: - return isValidToolPayload(payload); + return isValidToolPayload(payload) case MothershipStreamV1EventType.span: - return isValidSpanPayload(payload); + return isValidSpanPayload(payload) case MothershipStreamV1EventType.resource: - return isValidResourcePayload(payload); + return isValidResourcePayload(payload) case MothershipStreamV1EventType.run: - return isValidRunPayload(payload); + return isValidRunPayload(payload) case MothershipStreamV1EventType.error: - return isValidErrorPayload(payload); + return isValidErrorPayload(payload) case MothershipStreamV1EventType.complete: - return isValidCompletePayload(payload); + return isValidCompletePayload(payload) default: - return false; + return false } } @@ -335,133 +322,101 @@ function isContractEnvelope( // Synthetic file-preview envelope validators // --------------------------------------------------------------------------- -function isSyntheticEnvelopeBase( - value: unknown, -): value is Omit & { - payload?: unknown; +function isSyntheticEnvelopeBase(value: unknown): value is Omit< + SyntheticFilePreviewEventEnvelope, + 'payload' +> & { + payload?: unknown } { return ( isRecord(value) && value.v === 1 && - value.type === "tool" && - typeof value.seq === "number" && + value.type === 'tool' && + typeof value.seq === 'number' && Number.isFinite(value.seq) && - typeof value.ts === "string" && + typeof value.ts === 'string' && isStreamRef(value.stream) && (value.trace === undefined || isTrace(value.trace)) && (value.scope === undefined || isStreamScope(value.scope)) - ); + ) } -function isSyntheticFilePreviewTarget( - value: unknown, -): value is SyntheticFilePreviewTarget { +function isSyntheticFilePreviewTarget(value: unknown): value is SyntheticFilePreviewTarget { return ( isRecord(value) && - (value.kind === "new_file" || value.kind === "file_id") && + (value.kind === 'new_file' || value.kind === 'file_id') && isOptionalString(value.fileId) && isOptionalString(value.fileName) - ); + ) } -function isSyntheticFilePreviewPayload( - value: unknown, -): value is SyntheticFilePreviewPayload { +function isSyntheticFilePreviewPayload(value: unknown): value is SyntheticFilePreviewPayload { if (!isRecord(value)) { - return false; + return false } - if ( - typeof value.toolCallId !== "string" || - value.toolName !== "workspace_file" - ) { - return false; + if (typeof value.toolCallId !== 'string' || value.toolName !== 'workspace_file') { + return false } switch (value.previewPhase) { case FILE_PREVIEW_PHASE.start: - return true; + return true case FILE_PREVIEW_PHASE.target: return ( isSyntheticFilePreviewTarget(value.target) && isOptionalString(value.operation) && isOptionalString(value.title) - ); + ) case FILE_PREVIEW_PHASE.editMeta: - return isRecord(value.edit); + return isRecord(value.edit) case FILE_PREVIEW_PHASE.content: return ( - typeof value.content === "string" && - (value.contentMode === "delta" || value.contentMode === "snapshot") && - typeof value.previewVersion === "number" && + typeof value.content === 'string' && + (value.contentMode === 'delta' || value.contentMode === 'snapshot') && + typeof value.previewVersion === 'number' && Number.isFinite(value.previewVersion) && - typeof value.fileName === "string" && + typeof value.fileName === 'string' && isOptionalString(value.fileId) && isOptionalString(value.targetKind) && isOptionalString(value.operation) && (value.edit === undefined || isRecord(value.edit)) - ); + ) case FILE_PREVIEW_PHASE.complete: - return ( - isOptionalString(value.fileId) && - isOptionalFiniteNumber(value.previewVersion) - ); + return isOptionalString(value.fileId) && isOptionalFiniteNumber(value.previewVersion) default: - return false; + return false } } export function isSyntheticFilePreviewEventEnvelope( - value: unknown, + value: unknown ): value is SyntheticFilePreviewEventEnvelope { - return ( - isSyntheticEnvelopeBase(value) && - isSyntheticFilePreviewPayload(value.payload) - ); + return isSyntheticEnvelopeBase(value) && isSyntheticFilePreviewPayload(value.payload) } // --------------------------------------------------------------------------- // Stream event type guards // --------------------------------------------------------------------------- -export function isToolCallStreamEvent( - event: SessionStreamEvent, -): event is ToolCallStreamEvent { - return ( - event.type === "tool" && - isRecord(event.payload) && - event.payload.phase === "call" - ); +export function isToolCallStreamEvent(event: SessionStreamEvent): event is ToolCallStreamEvent { + return event.type === 'tool' && isRecord(event.payload) && event.payload.phase === 'call' } export function isToolArgsDeltaStreamEvent( - event: SessionStreamEvent, + event: SessionStreamEvent ): event is ToolArgsDeltaStreamEvent { - return ( - event.type === "tool" && - isRecord(event.payload) && - event.payload.phase === "args_delta" - ); + return event.type === 'tool' && isRecord(event.payload) && event.payload.phase === 'args_delta' } -export function isToolResultStreamEvent( - event: SessionStreamEvent, -): event is ToolResultStreamEvent { - return ( - event.type === "tool" && - isRecord(event.payload) && - event.payload.phase === "result" - ); +export function isToolResultStreamEvent(event: SessionStreamEvent): event is ToolResultStreamEvent { + return event.type === 'tool' && isRecord(event.payload) && event.payload.phase === 'result' } export function isSubagentSpanStreamEvent( - event: SessionStreamEvent, + event: SessionStreamEvent ): event is SubagentSpanStreamEvent { - return ( - event.type === "span" && - isRecord(event.payload) && - event.payload.kind === "subagent" - ); + return event.type === 'span' && isRecord(event.payload) && event.payload.kind === 'subagent' } // --------------------------------------------------------------------------- @@ -469,56 +424,51 @@ export function isSubagentSpanStreamEvent( // --------------------------------------------------------------------------- export function isContractStreamEventEnvelope( - value: unknown, + value: unknown ): value is MothershipStreamV1EventEnvelope { - return isContractEnvelope(value); + return isContractEnvelope(value) } -export function parsePersistedStreamEventEnvelope( - value: unknown, -): ParseStreamEventEnvelopeResult { +export function parsePersistedStreamEventEnvelope(value: unknown): ParseStreamEventEnvelopeResult { if (isContractEnvelope(value)) { - return { ok: true, event: value }; + return { ok: true, event: value } } if (isSyntheticFilePreviewEventEnvelope(value)) { - return { ok: true, event: value }; + return { ok: true, event: value } } - const hints: string[] = []; + const hints: string[] = [] if (!isRecord(value)) { - hints.push("value is not an object"); + hints.push('value is not an object') } else { - if (value.v !== 1) hints.push(`unexpected v=${JSON.stringify(value.v)}`); - if (typeof value.type !== "string") hints.push("missing type"); - else if (!KNOWN_EVENT_TYPES.has(value.type)) - hints.push(`unknown type="${value.type}"`); - if (!isRecord(value.payload)) hints.push("missing or invalid payload"); + if (value.v !== 1) hints.push(`unexpected v=${JSON.stringify(value.v)}`) + if (typeof value.type !== 'string') hints.push('missing type') + else if (!KNOWN_EVENT_TYPES.has(value.type)) hints.push(`unknown type="${value.type}"`) + if (!isRecord(value.payload)) hints.push('missing or invalid payload') } return { ok: false, - reason: "invalid_stream_event", - message: "A stream event failed validation.", + reason: 'invalid_stream_event', + message: 'A stream event failed validation.', ...(hints.length > 0 ? { errors: hints } : {}), - }; + } } -export function parsePersistedStreamEventEnvelopeJson( - raw: string, -): ParseStreamEventEnvelopeResult { - let parsed: unknown; +export function parsePersistedStreamEventEnvelopeJson(raw: string): ParseStreamEventEnvelopeResult { + let parsed: unknown try { - parsed = JSON.parse(raw); + parsed = JSON.parse(raw) } catch (error) { - const rawMessage = error instanceof Error ? error.message : "Invalid JSON"; + const rawMessage = error instanceof Error ? error.message : 'Invalid JSON' return { ok: false, - reason: "invalid_json", - message: "Received invalid JSON while parsing a stream event.", + reason: 'invalid_json', + message: 'Received invalid JSON while parsing a stream event.', ...(rawMessage ? { errors: [rawMessage] } : {}), - }; + } } - return parsePersistedStreamEventEnvelope(parsed); + return parsePersistedStreamEventEnvelope(parsed) } diff --git a/apps/sim/lib/copilot/request/session/explicit-abort.ts b/apps/sim/lib/copilot/request/session/explicit-abort.ts index 6b678647215..df080c4a3c8 100644 --- a/apps/sim/lib/copilot/request/session/explicit-abort.ts +++ b/apps/sim/lib/copilot/request/session/explicit-abort.ts @@ -1,16 +1,16 @@ -import type { Context } from "@opentelemetry/api"; -import { SIM_AGENT_API_URL } from "@/lib/copilot/constants"; -import { fetchGo } from "@/lib/copilot/request/go/fetch"; -import { env } from "@/lib/core/config/env"; +import type { Context } from '@opentelemetry/api' +import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { fetchGo } from '@/lib/copilot/request/go/fetch' +import { env } from '@/lib/core/config/env' -export const DEFAULT_EXPLICIT_ABORT_TIMEOUT_MS = 3000; +export const DEFAULT_EXPLICIT_ABORT_TIMEOUT_MS = 3000 export async function requestExplicitStreamAbort(params: { - streamId: string; - userId: string; - chatId?: string; - timeoutMs?: number; - otelContext?: Context; + streamId: string + userId: string + chatId?: string + timeoutMs?: number + otelContext?: Context }): Promise { const { streamId, @@ -18,49 +18,41 @@ export async function requestExplicitStreamAbort(params: { chatId, timeoutMs = DEFAULT_EXPLICIT_ABORT_TIMEOUT_MS, otelContext, - } = params; + } = params const headers: Record = { - "Content-Type": "application/json", - }; + 'Content-Type': 'application/json', + } if (env.COPILOT_API_KEY) { - headers["x-api-key"] = env.COPILOT_API_KEY; + headers['x-api-key'] = env.COPILOT_API_KEY } - const controller = new AbortController(); - const timeout = setTimeout( - () => controller.abort("timeout:go_explicit_abort_fetch"), - timeoutMs, - ); + const controller = new AbortController() + const timeout = setTimeout(() => controller.abort('timeout:go_explicit_abort_fetch'), timeoutMs) try { - const response = await fetchGo( - `${SIM_AGENT_API_URL}/api/streams/explicit-abort`, - { - method: "POST", - headers, - signal: controller.signal, - body: JSON.stringify({ - messageId: streamId, - userId, - ...(chatId ? { chatId } : {}), - }), - otelContext, - spanName: "sim → go /api/streams/explicit-abort", - operation: "explicit_abort", - attributes: { - "copilot.stream.id": streamId, - ...(chatId ? { "chat.id": chatId } : {}), - }, + const response = await fetchGo(`${SIM_AGENT_API_URL}/api/streams/explicit-abort`, { + method: 'POST', + headers, + signal: controller.signal, + body: JSON.stringify({ + messageId: streamId, + userId, + ...(chatId ? { chatId } : {}), + }), + otelContext, + spanName: 'sim → go /api/streams/explicit-abort', + operation: 'explicit_abort', + attributes: { + 'copilot.stream.id': streamId, + ...(chatId ? { 'chat.id': chatId } : {}), }, - ); + }) if (!response.ok) { - throw new Error( - `Explicit abort marker request failed: ${response.status}`, - ); + throw new Error(`Explicit abort marker request failed: ${response.status}`) } } finally { - clearTimeout(timeout); + clearTimeout(timeout) } } diff --git a/apps/sim/lib/copilot/request/session/recovery.test.ts b/apps/sim/lib/copilot/request/session/recovery.test.ts index 24599a01ffe..a06b42548b0 100644 --- a/apps/sim/lib/copilot/request/session/recovery.test.ts +++ b/apps/sim/lib/copilot/request/session/recovery.test.ts @@ -2,37 +2,37 @@ * @vitest-environment node */ -import { describe, expect, it, vi } from "vitest"; +import { describe, expect, it, vi } from 'vitest' const { getLatestSeq, getOldestSeq, readEvents } = vi.hoisted(() => ({ getLatestSeq: vi.fn(), getOldestSeq: vi.fn(), readEvents: vi.fn(), -})); +})) -vi.mock("./buffer", () => ({ +vi.mock('./buffer', () => ({ getLatestSeq, getOldestSeq, readEvents, -})); +})) -import { checkForReplayGap } from "./recovery"; +import { checkForReplayGap } from './recovery' -describe("checkForReplayGap", () => { - it("uses the latest buffered request id when run metadata is missing it", async () => { - getOldestSeq.mockResolvedValue(10); - getLatestSeq.mockResolvedValue(12); +describe('checkForReplayGap', () => { + it('uses the latest buffered request id when run metadata is missing it', async () => { + getOldestSeq.mockResolvedValue(10) + getLatestSeq.mockResolvedValue(12) readEvents.mockResolvedValue([ { - trace: { requestId: "req-live-123" }, + trace: { requestId: 'req-live-123' }, }, - ]); + ]) - const result = await checkForReplayGap("stream-1", "1"); + const result = await checkForReplayGap('stream-1', '1') - expect(readEvents).toHaveBeenCalledWith("stream-1", "11"); - expect(result?.gapDetected).toBe(true); - expect(result?.envelopes[0].trace.requestId).toBe("req-live-123"); - expect(result?.envelopes[1].trace.requestId).toBe("req-live-123"); - }); -}); + expect(readEvents).toHaveBeenCalledWith('stream-1', '11') + expect(result?.gapDetected).toBe(true) + expect(result?.envelopes[0].trace.requestId).toBe('req-live-123') + expect(result?.envelopes[1].trace.requestId).toBe('req-live-123') + }) +}) diff --git a/apps/sim/lib/copilot/request/session/recovery.ts b/apps/sim/lib/copilot/request/session/recovery.ts index 7271ff27258..56d2ab15481 100644 --- a/apps/sim/lib/copilot/request/session/recovery.ts +++ b/apps/sim/lib/copilot/request/session/recovery.ts @@ -1,46 +1,46 @@ -import { createLogger } from "@sim/logger"; +import { createLogger } from '@sim/logger' import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, -} from "@/lib/copilot/generated/mothership-stream-v1"; -import { TraceSpan } from "@/lib/copilot/generated/trace-spans-v1"; -import { withCopilotSpan } from "@/lib/copilot/request/otel"; -import { getLatestSeq, getOldestSeq, readEvents } from "./buffer"; -import { createEvent } from "./event"; +} from '@/lib/copilot/generated/mothership-stream-v1' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' +import { withCopilotSpan } from '@/lib/copilot/request/otel' +import { getLatestSeq, getOldestSeq, readEvents } from './buffer' +import { createEvent } from './event' -const logger = createLogger("SessionRecovery"); +const logger = createLogger('SessionRecovery') export interface ReplayGapResult { - gapDetected: true; - envelopes: ReturnType[]; + gapDetected: true + envelopes: ReturnType[] } export async function checkForReplayGap( streamId: string, afterCursor: string, - requestId?: string, + requestId?: string ): Promise { - const requestedAfterSeq = Number(afterCursor || "0"); + const requestedAfterSeq = Number(afterCursor || '0') if (requestedAfterSeq <= 0) { // Fast path: no cursor → nothing to check. Skip the span to avoid // emitting zero-work spans on every stream connect. - return null; + return null } return withCopilotSpan( TraceSpan.CopilotRecoveryCheckReplayGap, { - "stream.id": streamId, - "copilot.recovery.requested_after_seq": requestedAfterSeq, - ...(requestId ? { "request.id": requestId } : {}), + 'stream.id': streamId, + 'copilot.recovery.requested_after_seq': requestedAfterSeq, + ...(requestId ? { 'request.id': requestId } : {}), }, async (span) => { - const oldestSeq = await getOldestSeq(streamId); - const latestSeq = await getLatestSeq(streamId); + const oldestSeq = await getOldestSeq(streamId) + const latestSeq = await getLatestSeq(streamId) span.setAttributes({ - "copilot.recovery.oldest_seq": oldestSeq ?? -1, - "copilot.recovery.latest_seq": latestSeq ?? -1, - }); + 'copilot.recovery.oldest_seq': oldestSeq ?? -1, + 'copilot.recovery.latest_seq': latestSeq ?? -1, + }) if ( latestSeq !== null && @@ -48,21 +48,14 @@ export async function checkForReplayGap( oldestSeq !== null && requestedAfterSeq < oldestSeq - 1 ) { - const resolvedRequestId = await resolveReplayGapRequestId( + const resolvedRequestId = await resolveReplayGapRequestId(streamId, latestSeq, requestId) + logger.warn('Replay gap detected: requested cursor is below oldest available event', { streamId, + requestedAfterSeq, + oldestAvailableSeq: oldestSeq, latestSeq, - requestId, - ); - logger.warn( - "Replay gap detected: requested cursor is below oldest available event", - { - streamId, - requestedAfterSeq, - oldestAvailableSeq: oldestSeq, - latestSeq, - }, - ); - span.setAttribute("copilot.recovery.outcome", "gap_detected"); + }) + span.setAttribute('copilot.recovery.outcome', 'gap_detected') const gapEnvelope = createEvent({ streamId, @@ -71,15 +64,14 @@ export async function checkForReplayGap( requestId: resolvedRequestId, type: MothershipStreamV1EventType.error, payload: { - message: - "Replay history is no longer available. Some events may have been lost.", - code: "replay_gap", + message: 'Replay history is no longer available. Some events may have been lost.', + code: 'replay_gap', data: { oldestAvailableSeq: oldestSeq, requestedAfterSeq, }, }, - }); + }) const terminalEnvelope = createEvent({ streamId, @@ -89,44 +81,41 @@ export async function checkForReplayGap( type: MothershipStreamV1EventType.complete, payload: { status: MothershipStreamV1CompletionStatus.error, - reason: "replay_gap", + reason: 'replay_gap', }, - }); + }) return { gapDetected: true, envelopes: [gapEnvelope, terminalEnvelope], - }; + } } - span.setAttribute("copilot.recovery.outcome", "in_range"); - return null; - }, - ); + span.setAttribute('copilot.recovery.outcome', 'in_range') + return null + } + ) } async function resolveReplayGapRequestId( streamId: string, latestSeq: number, - requestId?: string, + requestId?: string ): Promise { - if (typeof requestId === "string" && requestId.length > 0) { - return requestId; + if (typeof requestId === 'string' && requestId.length > 0) { + return requestId } try { - const latestEvents = await readEvents( - streamId, - String(Math.max(latestSeq - 1, 0)), - ); - const latestRequestId = latestEvents[0]?.trace?.requestId; - return typeof latestRequestId === "string" ? latestRequestId : ""; + const latestEvents = await readEvents(streamId, String(Math.max(latestSeq - 1, 0))) + const latestRequestId = latestEvents[0]?.trace?.requestId + return typeof latestRequestId === 'string' ? latestRequestId : '' } catch (error) { - logger.warn("Failed to resolve request ID for replay gap", { + logger.warn('Failed to resolve request ID for replay gap', { streamId, latestSeq, error: error instanceof Error ? error.message : String(error), - }); - return ""; + }) + return '' } } diff --git a/apps/sim/lib/copilot/request/subagent.ts b/apps/sim/lib/copilot/request/subagent.ts index 6b569790f93..933e922ad80 100644 --- a/apps/sim/lib/copilot/request/subagent.ts +++ b/apps/sim/lib/copilot/request/subagent.ts @@ -1,177 +1,149 @@ -import { createLogger } from "@sim/logger"; -import { generateWorkspaceContext } from "@/lib/copilot/chat/workspace-context"; -import { SIM_AGENT_API_URL, SIM_AGENT_VERSION } from "@/lib/copilot/constants"; +import { createLogger } from '@sim/logger' +import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context' +import { SIM_AGENT_API_URL, SIM_AGENT_VERSION } from '@/lib/copilot/constants' import { MothershipStreamV1EventType, MothershipStreamV1SpanPayloadKind, -} from "@/lib/copilot/generated/mothership-stream-v1"; -import { createStreamingContext } from "@/lib/copilot/request/context/request-context"; -import { buildToolCallSummaries } from "@/lib/copilot/request/context/result"; -import { runStreamLoop } from "@/lib/copilot/request/go/stream"; -import { TraceSpan } from "@/lib/copilot/generated/trace-spans-v1"; -import { withCopilotSpan } from "@/lib/copilot/request/otel"; +} from '@/lib/copilot/generated/mothership-stream-v1' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' +import { createStreamingContext } from '@/lib/copilot/request/context/request-context' +import { buildToolCallSummaries } from '@/lib/copilot/request/context/result' +import { runStreamLoop } from '@/lib/copilot/request/go/stream' +import { withCopilotSpan } from '@/lib/copilot/request/otel' import type { ExecutionContext, OrchestratorOptions, StreamEvent, StreamingContext, ToolCallSummary, -} from "@/lib/copilot/request/types"; -import { prepareExecutionContext } from "@/lib/copilot/tools/handlers/context"; -import { env } from "@/lib/core/config/env"; -import { isHosted } from "@/lib/core/config/feature-flags"; -import { generateId } from "@/lib/core/utils/uuid"; -import { getEffectiveDecryptedEnv } from "@/lib/environment/utils"; -import { getWorkflowById } from "@/lib/workflows/utils"; +} from '@/lib/copilot/request/types' +import { prepareExecutionContext } from '@/lib/copilot/tools/handlers/context' +import { env } from '@/lib/core/config/env' +import { isHosted } from '@/lib/core/config/feature-flags' +import { generateId } from '@/lib/core/utils/uuid' +import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' +import { getWorkflowById } from '@/lib/workflows/utils' -const logger = createLogger("CopilotSubagentOrchestrator"); +const logger = createLogger('CopilotSubagentOrchestrator') -export interface SubagentOrchestratorOptions extends Omit< - OrchestratorOptions, - "onComplete" -> { - userId: string; - workflowId?: string; - workspaceId?: string; - userPermission?: string; - simRequestId?: string; - onComplete?: (result: SubagentOrchestratorResult) => void | Promise; +export interface SubagentOrchestratorOptions extends Omit { + userId: string + workflowId?: string + workspaceId?: string + userPermission?: string + simRequestId?: string + onComplete?: (result: SubagentOrchestratorResult) => void | Promise } export interface SubagentOrchestratorResult { - success: boolean; - content: string; - toolCalls: ToolCallSummary[]; + success: boolean + content: string + toolCalls: ToolCallSummary[] structuredResult?: { - type?: string; - summary?: string; - data?: unknown; - success?: boolean; - }; - error?: string; - errors?: string[]; + type?: string + summary?: string + data?: unknown + success?: boolean + } + error?: string + errors?: string[] } export async function orchestrateSubagentStream( agentId: string, requestPayload: Record, - options: SubagentOrchestratorOptions, + options: SubagentOrchestratorOptions ): Promise { return withCopilotSpan( TraceSpan.CopilotSubagentExecute, { - "subagent.id": agentId, - "user.id": options.userId, - ...(options.simRequestId ? { "sim.request_id": options.simRequestId } : {}), - ...(options.workflowId ? { "workflow.id": options.workflowId } : {}), - ...(options.workspaceId ? { "workspace.id": options.workspaceId } : {}), + 'subagent.id': agentId, + 'user.id': options.userId, + ...(options.simRequestId ? { 'sim.request_id': options.simRequestId } : {}), + ...(options.workflowId ? { 'workflow.id': options.workflowId } : {}), + ...(options.workspaceId ? { 'workspace.id': options.workspaceId } : {}), }, async (otelSpan) => { - const result = await orchestrateSubagentStreamInner( - agentId, - requestPayload, - options, - ); + const result = await orchestrateSubagentStreamInner(agentId, requestPayload, options) otelSpan.setAttributes({ - "subagent.outcome.success": result.success, - "subagent.outcome.tool_call_count": result.toolCalls.length, - "subagent.outcome.content_bytes": result.content?.length ?? 0, + 'subagent.outcome.success': result.success, + 'subagent.outcome.tool_call_count': result.toolCalls.length, + 'subagent.outcome.content_bytes': result.content?.length ?? 0, ...(result.structuredResult?.type - ? { "subagent.outcome.structured_type": result.structuredResult.type } - : {}), - ...(result.error - ? { "subagent.outcome.error": String(result.error).slice(0, 500) } + ? { 'subagent.outcome.structured_type': result.structuredResult.type } : {}), - }); - return result; - }, - ); + ...(result.error ? { 'subagent.outcome.error': String(result.error).slice(0, 500) } : {}), + }) + return result + } + ) } async function orchestrateSubagentStreamInner( agentId: string, requestPayload: Record, - options: SubagentOrchestratorOptions, + options: SubagentOrchestratorOptions ): Promise { - const { userId, workflowId, workspaceId, userPermission } = options; + const { userId, workflowId, workspaceId, userPermission } = options const chatId = - (typeof requestPayload.chatId === "string" && requestPayload.chatId) || - generateId(); - const execContext = await buildExecutionContext( - userId, - workflowId, - workspaceId, - chatId, - ); + (typeof requestPayload.chatId === 'string' && requestPayload.chatId) || generateId() + const execContext = await buildExecutionContext(userId, workflowId, workspaceId, chatId) let resolvedWorkflowName = - typeof requestPayload.workflowName === "string" - ? requestPayload.workflowName - : undefined; + typeof requestPayload.workflowName === 'string' ? requestPayload.workflowName : undefined let resolvedWorkspaceId = execContext.workspaceId || - (typeof requestPayload.workspaceId === "string" - ? requestPayload.workspaceId - : workspaceId); + (typeof requestPayload.workspaceId === 'string' ? requestPayload.workspaceId : workspaceId) if (workflowId && (!resolvedWorkflowName || !resolvedWorkspaceId)) { - const workflow = await getWorkflowById(workflowId); - resolvedWorkflowName ||= workflow?.name || undefined; - resolvedWorkspaceId ||= workflow?.workspaceId || undefined; + const workflow = await getWorkflowById(workflowId) + resolvedWorkflowName ||= workflow?.name || undefined + resolvedWorkspaceId ||= workflow?.workspaceId || undefined } let resolvedWorkspaceContext = - typeof requestPayload.workspaceContext === "string" + typeof requestPayload.workspaceContext === 'string' ? requestPayload.workspaceContext - : undefined; + : undefined if (!resolvedWorkspaceContext && resolvedWorkspaceId) { try { - resolvedWorkspaceContext = await generateWorkspaceContext( - resolvedWorkspaceId, - userId, - ); + resolvedWorkspaceContext = await generateWorkspaceContext(resolvedWorkspaceId, userId) } catch (error) { - logger.warn("Failed to generate workspace context for subagent request", { + logger.warn('Failed to generate workspace context for subagent request', { agentId, workspaceId: resolvedWorkspaceId, error: error instanceof Error ? error.message : String(error), - }); + }) } } - const msgId = requestPayload?.messageId; + const msgId = requestPayload?.messageId const context = createStreamingContext({ chatId, requestId: options.simRequestId, - messageId: typeof msgId === "string" ? msgId : generateId(), - }); + messageId: typeof msgId === 'string' ? msgId : generateId(), + }) - let structuredResult: SubagentOrchestratorResult["structuredResult"]; + let structuredResult: SubagentOrchestratorResult['structuredResult'] try { await runStreamLoop( `${SIM_AGENT_API_URL}/api/subagent/${agentId}`, { - method: "POST", + method: 'POST', headers: { - "Content-Type": "application/json", - ...(env.COPILOT_API_KEY ? { "x-api-key": env.COPILOT_API_KEY } : {}), - "X-Client-Version": SIM_AGENT_VERSION, - ...(options.simRequestId - ? { "X-Sim-Request-ID": options.simRequestId } - : {}), + 'Content-Type': 'application/json', + ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), + 'X-Client-Version': SIM_AGENT_VERSION, + ...(options.simRequestId ? { 'X-Sim-Request-ID': options.simRequestId } : {}), }, body: JSON.stringify({ ...requestPayload, chatId, userId, stream: true, - ...(resolvedWorkflowName - ? { workflowName: resolvedWorkflowName } - : {}), + ...(resolvedWorkflowName ? { workflowName: resolvedWorkflowName } : {}), ...(resolvedWorkspaceId ? { workspaceId: resolvedWorkspaceId } : {}), - ...(resolvedWorkspaceContext - ? { workspaceContext: resolvedWorkspaceContext } - : {}), + ...(resolvedWorkspaceContext ? { workspaceContext: resolvedWorkspaceContext } : {}), isHosted, ...(userPermission ? { userPermission } : {}), }), @@ -184,27 +156,22 @@ async function orchestrateSubagentStreamInner( onBeforeDispatch: (event: StreamEvent, ctx: StreamingContext) => { if ( event.type === MothershipStreamV1EventType.span && - (event.payload.kind === - MothershipStreamV1SpanPayloadKind.structured_result || - event.payload.kind === - MothershipStreamV1SpanPayloadKind.subagent_result) + (event.payload.kind === MothershipStreamV1SpanPayloadKind.structured_result || + event.payload.kind === MothershipStreamV1SpanPayloadKind.subagent_result) ) { - structuredResult = normalizeStructuredResult(event.payload.data); - ctx.streamComplete = true; - return true; + structuredResult = normalizeStructuredResult(event.payload.data) + ctx.streamComplete = true + return true } - if ( - event.scope?.agentId === agentId && - !ctx.subAgentParentToolCallId - ) { - return false; + if (event.scope?.agentId === agentId && !ctx.subAgentParentToolCallId) { + return false } - return false; + return false }, - }, - ); + } + ) const result: SubagentOrchestratorResult = { success: context.errors.length === 0 && !context.wasAborted, @@ -212,56 +179,51 @@ async function orchestrateSubagentStreamInner( toolCalls: buildToolCallSummaries(context), structuredResult, errors: context.errors.length ? context.errors : undefined, - }; - await options.onComplete?.(result); - return result; + } + await options.onComplete?.(result) + return result } catch (error) { - const err = - error instanceof Error - ? error - : new Error("Subagent orchestration failed"); - logger.error("Subagent orchestration failed", { + const err = error instanceof Error ? error : new Error('Subagent orchestration failed') + logger.error('Subagent orchestration failed', { error: err.message, agentId, - }); - await options.onError?.(err); + }) + await options.onError?.(err) return { success: false, content: context.accumulatedContent, toolCalls: [], error: err.message, - }; + } } } -function normalizeStructuredResult( - data: unknown, -): SubagentOrchestratorResult["structuredResult"] { - if (!data || typeof data !== "object") return undefined; - const d = data as Record; +function normalizeStructuredResult(data: unknown): SubagentOrchestratorResult['structuredResult'] { + if (!data || typeof data !== 'object') return undefined + const d = data as Record return { type: (d.result_type || d.type) as string | undefined, summary: d.summary as string | undefined, data: d.data ?? d, success: d.success as boolean | undefined, - }; + } } async function buildExecutionContext( userId: string, workflowId?: string, workspaceId?: string, - chatId?: string, + chatId?: string ): Promise { if (workflowId) { - return prepareExecutionContext(userId, workflowId, chatId, { workspaceId }); + return prepareExecutionContext(userId, workflowId, chatId, { workspaceId }) } - const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId); + const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId) return { userId, - workflowId: workflowId || "", + workflowId: workflowId || '', workspaceId, chatId, decryptedEnvVars, - }; + } } diff --git a/apps/sim/lib/copilot/request/tools/executor.ts b/apps/sim/lib/copilot/request/tools/executor.ts index 2016d02fa9d..55b358221df 100644 --- a/apps/sim/lib/copilot/request/tools/executor.ts +++ b/apps/sim/lib/copilot/request/tools/executor.ts @@ -18,6 +18,7 @@ import { } from '@/lib/copilot/generated/mothership-stream-v1' import { CreateWorkflow } from '@/lib/copilot/generated/tool-catalog-v1' import { publishToolConfirmation } from '@/lib/copilot/persistence/tool-confirm' +import { withCopilotToolSpan } from '@/lib/copilot/request/otel' import { markToolResultSeen } from '@/lib/copilot/request/sse-utils' import { getToolCallStateOutput, @@ -39,7 +40,6 @@ import { type StreamingContext, type ToolCallState, } from '@/lib/copilot/request/types' -import { withCopilotToolSpan } from '@/lib/copilot/request/otel' import { ensureHandlersRegistered, executeTool } from '@/lib/copilot/tool-executor' export { waitForToolCompletion } from '@/lib/copilot/request/tools/client' @@ -107,18 +107,20 @@ function summarizeToolResultForSpan(result: { } function extractAttachmentShape( - output: unknown, + output: unknown ): { imageCount: number; imageBytes: number; mediaType?: string } | null { if (!isRecord(output)) return null const candidate = (output as Record).attachment if (!isRecord(candidate)) return null const source = (candidate as Record).source if (!isRecord(source)) return null - const type = typeof (candidate as Record).type === 'string' - ? ((candidate as Record).type as string) - : '' + const type = + typeof (candidate as Record).type === 'string' + ? ((candidate as Record).type as string) + : '' if (type !== 'image') return null - const mediaType = typeof source.media_type === 'string' ? (source.media_type as string) : undefined + const mediaType = + typeof source.media_type === 'string' ? (source.media_type as string) : undefined const data = typeof source.data === 'string' ? (source.data as string) : '' return { imageCount: 1, @@ -237,13 +239,15 @@ export async function executeToolAndReport( message: 'Tool call not found', }) - const argsPayload = toolCall.params ? (() => { - try { - return JSON.stringify(toolCall.params) - } catch { - return undefined - } - })() : undefined + const argsPayload = toolCall.params + ? (() => { + try { + return JSON.stringify(toolCall.params) + } catch { + return undefined + } + })() + : undefined return withCopilotToolSpan( { toolName: toolCall.name, @@ -260,7 +264,7 @@ export async function executeToolAndReport( otelSpan.setAttribute('tool.outcome.message', String(completion.message).slice(0, 500)) } return completion - }, + } ) } @@ -268,9 +272,8 @@ async function executeToolAndReportInner( toolCall: ToolCallState, context: StreamingContext, execContext: ExecutionContext, - options?: OrchestratorOptions, + options?: OrchestratorOptions ): Promise { - if (toolCall.status === 'executing') { return buildCompletionSignal({ status: MothershipStreamV1AsyncToolRecordStatus.running, diff --git a/apps/sim/lib/copilot/request/tools/files.ts b/apps/sim/lib/copilot/request/tools/files.ts index 2c94cebde94..4bfb35d00b3 100644 --- a/apps/sim/lib/copilot/request/tools/files.ts +++ b/apps/sim/lib/copilot/request/tools/files.ts @@ -222,6 +222,6 @@ export async function maybeWriteOutputToFile( error: `Failed to write output file: ${message}`, } } - }, + } ) } diff --git a/apps/sim/lib/copilot/request/tools/resources.ts b/apps/sim/lib/copilot/request/tools/resources.ts index 22f02ba959e..84948df08ed 100644 --- a/apps/sim/lib/copilot/request/tools/resources.ts +++ b/apps/sim/lib/copilot/request/tools/resources.ts @@ -34,7 +34,11 @@ export async function handleResourceSideEffects( // Cheap early exit so we don't emit a span for tools that can never // produce resources (most of them). The span only shows up for tools // that might actually do resource work. - if (!hasDeleteCapability(toolName) && !isResourceToolName(toolName) && !(result.resources && result.resources.length > 0)) { + if ( + !hasDeleteCapability(toolName) && + !isResourceToolName(toolName) && + !(result.resources && result.resources.length > 0) + ) { return } @@ -119,6 +123,6 @@ export async function handleResourceSideEffects( 'copilot.resources.upserted_count': upsertedCount, 'copilot.resources.aborted': isAborted(), }) - }, + } ) } diff --git a/apps/sim/lib/copilot/request/tools/tables.ts b/apps/sim/lib/copilot/request/tools/tables.ts index 7489f07934c..a7df1e9ed99 100644 --- a/apps/sim/lib/copilot/request/tools/tables.ts +++ b/apps/sim/lib/copilot/request/tools/tables.ts @@ -138,15 +138,14 @@ export async function maybeWriteOutputToTable( }) span.setAttribute('copilot.table.outcome', 'failed') span.addEvent('copilot.table.error', { - 'error.message': - (err instanceof Error ? err.message : String(err)).slice(0, 500), + 'error.message': (err instanceof Error ? err.message : String(err)).slice(0, 500), }) return { success: false, error: `Failed to write to table: ${err instanceof Error ? err.message : String(err)}`, } } - }, + } ) } @@ -286,14 +285,13 @@ export async function maybeWriteReadCsvToTable( }) span.setAttribute('copilot.table.outcome', 'failed') span.addEvent('copilot.table.error', { - 'error.message': - (err instanceof Error ? err.message : String(err)).slice(0, 500), + 'error.message': (err instanceof Error ? err.message : String(err)).slice(0, 500), }) return { success: false, error: `Failed to import into table: ${err instanceof Error ? err.message : String(err)}`, } } - }, + } ) } diff --git a/apps/sim/lib/copilot/request/trace.ts b/apps/sim/lib/copilot/request/trace.ts index 5a4c8dfd498..0e4d686942f 100644 --- a/apps/sim/lib/copilot/request/trace.ts +++ b/apps/sim/lib/copilot/request/trace.ts @@ -1,6 +1,6 @@ -import type { Context } from "@opentelemetry/api"; -import { createLogger } from "@sim/logger"; -import { SIM_AGENT_API_URL } from "@/lib/copilot/constants"; +import type { Context } from '@opentelemetry/api' +import { createLogger } from '@sim/logger' +import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { type RequestTraceV1CostSummary, RequestTraceV1Outcome, @@ -9,24 +9,24 @@ import { RequestTraceV1SpanSource, RequestTraceV1SpanStatus, type RequestTraceV1UsageSummary, -} from "@/lib/copilot/generated/request-trace-v1"; -import { env } from "@/lib/core/config/env"; +} from '@/lib/copilot/generated/request-trace-v1' +import { env } from '@/lib/core/config/env' -const logger = createLogger("RequestTrace"); +const logger = createLogger('RequestTrace') export class TraceCollector { - private readonly spans: RequestTraceV1Span[] = []; - private readonly startMs = Date.now(); - private goTraceId?: string; - private activeSpan?: RequestTraceV1Span; + private readonly spans: RequestTraceV1Span[] = [] + private readonly startMs = Date.now() + private goTraceId?: string + private activeSpan?: RequestTraceV1Span startSpan( name: string, kind: string, attributes?: Record, - parent?: RequestTraceV1Span, + parent?: RequestTraceV1Span ): RequestTraceV1Span { - const startMs = Date.now(); + const startMs = Date.now() const span: RequestTraceV1Span = { name, kind, @@ -40,57 +40,55 @@ export class TraceCollector { : this.activeSpan ? { parentName: this.activeSpan.name } : {}), - ...(attributes && Object.keys(attributes).length > 0 - ? { attributes } - : {}), - }; - this.spans.push(span); - return span; + ...(attributes && Object.keys(attributes).length > 0 ? { attributes } : {}), + } + this.spans.push(span) + return span } endSpan( span: RequestTraceV1Span, - status: RequestTraceV1SpanStatus | string = RequestTraceV1SpanStatus.ok, + status: RequestTraceV1SpanStatus | string = RequestTraceV1SpanStatus.ok ): void { - span.endMs = Date.now(); - span.durationMs = span.endMs - span.startMs; - span.status = status as RequestTraceV1SpanStatus; + span.endMs = Date.now() + span.durationMs = span.endMs - span.startMs + span.status = status as RequestTraceV1SpanStatus } setActiveSpan(span: RequestTraceV1Span | undefined): void { - this.activeSpan = span; + this.activeSpan = span } setGoTraceId(id: string): void { if (!this.goTraceId && id) { - this.goTraceId = id; + this.goTraceId = id } } build(params: { - outcome: RequestTraceV1Outcome; - simRequestId: string; - streamId?: string; - chatId?: string; - runId?: string; - executionId?: string; - usage?: { prompt: number; completion: number }; - cost?: { input: number; output: number; total: number }; + outcome: RequestTraceV1Outcome + simRequestId: string + streamId?: string + chatId?: string + runId?: string + executionId?: string + usage?: { prompt: number; completion: number } + cost?: { input: number; output: number; total: number } }): RequestTraceV1SimReport { - const endMs = Date.now(); + const endMs = Date.now() const usage: RequestTraceV1UsageSummary | undefined = params.usage ? { inputTokens: params.usage.prompt, outputTokens: params.usage.completion, } - : undefined; + : undefined const cost: RequestTraceV1CostSummary | undefined = params.cost ? { rawTotalCost: params.cost.total, billedTotalCost: params.cost.total, } - : undefined; + : undefined return { simRequestId: params.simRequestId, @@ -106,39 +104,39 @@ export class TraceCollector { usage, cost, spans: this.spans, - }; + } } } export async function reportTrace( trace: RequestTraceV1SimReport, - otelContext?: Context, + otelContext?: Context ): Promise { - const { fetchGo } = await import("@/lib/copilot/request/go/fetch"); - const body = JSON.stringify(trace); + const { fetchGo } = await import('@/lib/copilot/request/go/fetch') + const body = JSON.stringify(trace) const response = await fetchGo(`${SIM_AGENT_API_URL}/api/traces`, { - method: "POST", + method: 'POST', headers: { - "Content-Type": "application/json", - ...(env.COPILOT_API_KEY ? { "x-api-key": env.COPILOT_API_KEY } : {}), + 'Content-Type': 'application/json', + ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), }, body, otelContext, - spanName: "sim → go /api/traces", - operation: "report_trace", + spanName: 'sim → go /api/traces', + operation: 'report_trace', attributes: { - "copilot.request.id": trace.simRequestId ?? "", - "http.request.content_length": body.length, - "copilot.trace.span_count": trace.spans?.length ?? 0, + 'copilot.request.id': trace.simRequestId ?? '', + 'http.request.content_length': body.length, + 'copilot.trace.span_count': trace.spans?.length ?? 0, }, - }); + }) if (!response.ok) { - logger.warn("Failed to report trace", { + logger.warn('Failed to report trace', { status: response.status, simRequestId: trace.simRequestId, - }); + }) } } -export { RequestTraceV1Outcome, RequestTraceV1SpanStatus }; +export { RequestTraceV1Outcome, RequestTraceV1SpanStatus } diff --git a/apps/sim/lib/copilot/request/types.ts b/apps/sim/lib/copilot/request/types.ts index 2964c9f354c..0cf48491929 100644 --- a/apps/sim/lib/copilot/request/types.ts +++ b/apps/sim/lib/copilot/request/types.ts @@ -1,7 +1,7 @@ import type { AsyncCompletionSignal } from '@/lib/copilot/async-runs/lifecycle' import { MothershipStreamV1ToolOutcome } from '@/lib/copilot/generated/mothership-stream-v1' -import type { StreamEvent } from '@/lib/copilot/request/session' import type { RequestTraceV1Span } from '@/lib/copilot/generated/request-trace-v1' +import type { StreamEvent } from '@/lib/copilot/request/session' import type { TraceCollector } from '@/lib/copilot/request/trace' import type { ToolExecutionContext, ToolExecutionResult } from '@/lib/copilot/tool-executor/types' diff --git a/apps/sim/lib/copilot/vfs/file-reader.test.ts b/apps/sim/lib/copilot/vfs/file-reader.test.ts index 0efe948bf57..1e202d77d5f 100644 --- a/apps/sim/lib/copilot/vfs/file-reader.test.ts +++ b/apps/sim/lib/copilot/vfs/file-reader.test.ts @@ -23,7 +23,9 @@ async function makeNoisePng(width: number, height: number): Promise { const sharp = (await import('sharp')).default const raw = Buffer.alloc(width * height * 3) randomFillSync(raw) - return sharp(raw, { raw: { width, height, channels: 3 } }).png().toBuffer() + return sharp(raw, { raw: { width, height, channels: 3 } }) + .png() + .toBuffer() } describe('readFileRecord', () => { diff --git a/apps/sim/lib/copilot/vfs/file-reader.ts b/apps/sim/lib/copilot/vfs/file-reader.ts index 7a4deee6cf6..eea76a44e71 100644 --- a/apps/sim/lib/copilot/vfs/file-reader.ts +++ b/apps/sim/lib/copilot/vfs/file-reader.ts @@ -1,4 +1,4 @@ -import { SpanStatusCode, trace, type Span } from '@opentelemetry/api' +import { type Span, SpanStatusCode, trace } from '@opentelemetry/api' import { createLogger } from '@sim/logger' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import type { WorkspaceFileRecord } from '@/lib/uploads/contexts/workspace/workspace-file-manager' @@ -63,9 +63,9 @@ function detectImageMime(buf: Buffer, claimed: string): string { } interface PreparedVisionImage { - buffer: Buffer - mediaType: string - resized: boolean + buffer: Buffer + mediaType: string + resized: boolean } /** @@ -79,178 +79,174 @@ interface PreparedVisionImage { * dimension/quality chosen. */ async function prepareImageForVision( - buffer: Buffer, - claimedType: string + buffer: Buffer, + claimedType: string ): Promise { - return getVfsTracer().startActiveSpan( - TraceSpan.CopilotVfsPrepareImage, - { - attributes: { - 'copilot.vfs.input.bytes': buffer.length, - 'copilot.vfs.input.media_type_claimed': claimedType, - }, - }, - async (span) => { - try { - const mediaType = detectImageMime(buffer, claimedType) - span.setAttribute('copilot.vfs.input.media_type_detected', mediaType) + return getVfsTracer().startActiveSpan( + TraceSpan.CopilotVfsPrepareImage, + { + attributes: { + 'copilot.vfs.input.bytes': buffer.length, + 'copilot.vfs.input.media_type_claimed': claimedType, + }, + }, + async (span) => { + try { + const mediaType = detectImageMime(buffer, claimedType) + span.setAttribute('copilot.vfs.input.media_type_detected', mediaType) - let sharpModule: typeof import('sharp').default - try { - sharpModule = (await import('sharp')).default - } catch (err) { - logger.warn('Failed to load sharp for image preparation', { - mediaType, - error: err instanceof Error ? err.message : String(err), - }) - span.setAttribute('copilot.vfs.sharp.load_failed', true) - const fitsWithoutSharp = buffer.length <= MAX_IMAGE_READ_BYTES - span.setAttribute( - 'copilot.vfs.outcome', - fitsWithoutSharp ? 'passthrough_no_sharp' : 'rejected_no_sharp', - ) - return fitsWithoutSharp ? { buffer, mediaType, resized: false } : null - } + let sharpModule: typeof import('sharp').default + try { + sharpModule = (await import('sharp')).default + } catch (err) { + logger.warn('Failed to load sharp for image preparation', { + mediaType, + error: err instanceof Error ? err.message : String(err), + }) + span.setAttribute('copilot.vfs.sharp.load_failed', true) + const fitsWithoutSharp = buffer.length <= MAX_IMAGE_READ_BYTES + span.setAttribute( + 'copilot.vfs.outcome', + fitsWithoutSharp ? 'passthrough_no_sharp' : 'rejected_no_sharp' + ) + return fitsWithoutSharp ? { buffer, mediaType, resized: false } : null + } - let metadata: Awaited['metadata']>> - try { - metadata = await sharpModule(buffer, { limitInputPixels: false }).metadata() - } catch (err) { - logger.warn('Failed to read image metadata for VFS read', { - mediaType, - error: err instanceof Error ? err.message : String(err), - }) - span.setAttribute('copilot.vfs.metadata.failed', true) - const fitsWithoutSharp = buffer.length <= MAX_IMAGE_READ_BYTES - span.setAttribute( - 'copilot.vfs.outcome', - fitsWithoutSharp ? 'passthrough_no_metadata' : 'rejected_no_metadata', - ) - return fitsWithoutSharp ? { buffer, mediaType, resized: false } : null - } + let metadata: Awaited['metadata']>> + try { + metadata = await sharpModule(buffer, { limitInputPixels: false }).metadata() + } catch (err) { + logger.warn('Failed to read image metadata for VFS read', { + mediaType, + error: err instanceof Error ? err.message : String(err), + }) + span.setAttribute('copilot.vfs.metadata.failed', true) + const fitsWithoutSharp = buffer.length <= MAX_IMAGE_READ_BYTES + span.setAttribute( + 'copilot.vfs.outcome', + fitsWithoutSharp ? 'passthrough_no_metadata' : 'rejected_no_metadata' + ) + return fitsWithoutSharp ? { buffer, mediaType, resized: false } : null + } - const width = metadata.width ?? 0 - const height = metadata.height ?? 0 - span.setAttributes({ - 'copilot.vfs.input.width': width, - 'copilot.vfs.input.height': height, - }) + const width = metadata.width ?? 0 + const height = metadata.height ?? 0 + span.setAttributes({ + 'copilot.vfs.input.width': width, + 'copilot.vfs.input.height': height, + }) - const needsResize = - buffer.length > MAX_IMAGE_READ_BYTES || - width > MAX_IMAGE_DIMENSION || - height > MAX_IMAGE_DIMENSION - if (!needsResize) { - span.setAttributes({ - 'copilot.vfs.resized': false, - 'copilot.vfs.outcome': 'passthrough_fits_budget', - 'copilot.vfs.output.bytes': buffer.length, - 'copilot.vfs.output.media_type': mediaType, - }) - return { buffer, mediaType, resized: false } - } + const needsResize = + buffer.length > MAX_IMAGE_READ_BYTES || + width > MAX_IMAGE_DIMENSION || + height > MAX_IMAGE_DIMENSION + if (!needsResize) { + span.setAttributes({ + 'copilot.vfs.resized': false, + 'copilot.vfs.outcome': 'passthrough_fits_budget', + 'copilot.vfs.output.bytes': buffer.length, + 'copilot.vfs.output.media_type': mediaType, + }) + return { buffer, mediaType, resized: false } + } - const hasAlpha = Boolean( - metadata.hasAlpha || - mediaType === 'image/png' || - mediaType === 'image/webp' || - mediaType === 'image/gif' - ) - span.setAttribute('copilot.vfs.has_alpha', hasAlpha) + const hasAlpha = Boolean( + metadata.hasAlpha || + mediaType === 'image/png' || + mediaType === 'image/webp' || + mediaType === 'image/gif' + ) + span.setAttribute('copilot.vfs.has_alpha', hasAlpha) - let attempts = 0 - for (const dimension of IMAGE_RESIZE_DIMENSIONS) { - for (const quality of IMAGE_QUALITY_STEPS) { - attempts += 1 - try { - const pipeline = sharpModule(buffer, { limitInputPixels: false }) - .rotate() - .resize({ - width: dimension, - height: dimension, - fit: 'inside', - withoutEnlargement: true, - }) + let attempts = 0 + for (const dimension of IMAGE_RESIZE_DIMENSIONS) { + for (const quality of IMAGE_QUALITY_STEPS) { + attempts += 1 + try { + const pipeline = sharpModule(buffer, { limitInputPixels: false }).rotate().resize({ + width: dimension, + height: dimension, + fit: 'inside', + withoutEnlargement: true, + }) - const transformed = hasAlpha - ? { - buffer: await pipeline - .webp({ quality, alphaQuality: quality, effort: 4 }) - .toBuffer(), - mediaType: 'image/webp', - } - : { - buffer: await pipeline - .jpeg({ quality, mozjpeg: true, chromaSubsampling: '4:4:4' }) - .toBuffer(), - mediaType: 'image/jpeg', - } + const transformed = hasAlpha + ? { + buffer: await pipeline + .webp({ quality, alphaQuality: quality, effort: 4 }) + .toBuffer(), + mediaType: 'image/webp', + } + : { + buffer: await pipeline + .jpeg({ quality, mozjpeg: true, chromaSubsampling: '4:4:4' }) + .toBuffer(), + mediaType: 'image/jpeg', + } - span.addEvent('copilot.vfs.resize_attempt', { - 'copilot.vfs.resize.dimension': dimension, - 'copilot.vfs.resize.quality': quality, - 'copilot.vfs.resize.output_bytes': transformed.buffer.length, - 'copilot.vfs.resize.fits_budget': - transformed.buffer.length <= MAX_IMAGE_READ_BYTES, - }) + span.addEvent('copilot.vfs.resize_attempt', { + 'copilot.vfs.resize.dimension': dimension, + 'copilot.vfs.resize.quality': quality, + 'copilot.vfs.resize.output_bytes': transformed.buffer.length, + 'copilot.vfs.resize.fits_budget': transformed.buffer.length <= MAX_IMAGE_READ_BYTES, + }) - if (transformed.buffer.length <= MAX_IMAGE_READ_BYTES) { - logger.info('Resized image for VFS read', { - originalBytes: buffer.length, - outputBytes: transformed.buffer.length, - originalWidth: width || undefined, - originalHeight: height || undefined, - maxDimension: dimension, - quality, - originalMediaType: mediaType, - outputMediaType: transformed.mediaType, - }) - span.setAttributes({ - 'copilot.vfs.resized': true, - 'copilot.vfs.resize.attempts': attempts, - 'copilot.vfs.resize.chosen_dimension': dimension, - 'copilot.vfs.resize.chosen_quality': quality, - 'copilot.vfs.output.bytes': transformed.buffer.length, - 'copilot.vfs.output.media_type': transformed.mediaType, - 'copilot.vfs.outcome': 'resized', - }) - return { - buffer: transformed.buffer, - mediaType: transformed.mediaType, - resized: true, - } - } - } catch (err) { - logger.warn('Failed image resize attempt for VFS read', { - mediaType, - dimension, - quality, - error: err instanceof Error ? err.message : String(err), - }) - span.addEvent('copilot.vfs.resize_attempt_failed', { - 'copilot.vfs.resize.dimension': dimension, - 'copilot.vfs.resize.quality': quality, - 'error.message': - err instanceof Error ? err.message : String(err).slice(0, 500), - }) - } - } - } + if (transformed.buffer.length <= MAX_IMAGE_READ_BYTES) { + logger.info('Resized image for VFS read', { + originalBytes: buffer.length, + outputBytes: transformed.buffer.length, + originalWidth: width || undefined, + originalHeight: height || undefined, + maxDimension: dimension, + quality, + originalMediaType: mediaType, + outputMediaType: transformed.mediaType, + }) + span.setAttributes({ + 'copilot.vfs.resized': true, + 'copilot.vfs.resize.attempts': attempts, + 'copilot.vfs.resize.chosen_dimension': dimension, + 'copilot.vfs.resize.chosen_quality': quality, + 'copilot.vfs.output.bytes': transformed.buffer.length, + 'copilot.vfs.output.media_type': transformed.mediaType, + 'copilot.vfs.outcome': 'resized', + }) + return { + buffer: transformed.buffer, + mediaType: transformed.mediaType, + resized: true, + } + } + } catch (err) { + logger.warn('Failed image resize attempt for VFS read', { + mediaType, + dimension, + quality, + error: err instanceof Error ? err.message : String(err), + }) + span.addEvent('copilot.vfs.resize_attempt_failed', { + 'copilot.vfs.resize.dimension': dimension, + 'copilot.vfs.resize.quality': quality, + 'error.message': err instanceof Error ? err.message : String(err).slice(0, 500), + }) + } + } + } - span.setAttributes({ - 'copilot.vfs.resized': false, - 'copilot.vfs.resize.attempts': attempts, - 'copilot.vfs.outcome': 'rejected_too_large_after_resize', - }) - return null - } catch (err) { - recordSpanError(span, err) - throw err - } finally { - span.end() - } - }, - ) + span.setAttributes({ + 'copilot.vfs.resized': false, + 'copilot.vfs.resize.attempts': attempts, + 'copilot.vfs.outcome': 'rejected_too_large_after_resize', + }) + return null + } catch (err) { + recordSpanError(span, err) + throw err + } finally { + span.end() + } + } + ) } export interface FileReadResult { @@ -366,9 +362,7 @@ export async function readFileRecord(record: WorkspaceFileRecord): Promise Date: Thu, 16 Apr 2026 17:39:47 -0700 Subject: [PATCH 04/10] Otel --- apps/sim/instrumentation-node.ts | 133 ++++++++++++++++++++++++++++--- 1 file changed, 123 insertions(+), 10 deletions(-) diff --git a/apps/sim/instrumentation-node.ts b/apps/sim/instrumentation-node.ts index f3a847a1448..1695eb00d11 100644 --- a/apps/sim/instrumentation-node.ts +++ b/apps/sim/instrumentation-node.ts @@ -66,6 +66,86 @@ function isBusinessSpan(spanName: string): boolean { return ALLOWED_SPAN_PREFIXES.some((prefix) => spanName.startsWith(prefix)) } +/** + * Parse OTLP headers from the standard env var `OTEL_EXPORTER_OTLP_HEADERS`. + * + * Spec format: `key1=value1,key2=value2`, with values optionally + * URL-encoded. We tolerate whitespace around entries and values that + * themselves contain `=`. This is the mechanism every managed backend + * (Honeycomb, Grafana Cloud, New Relic, Datadog) uses to receive its + * auth token without any backend-specific code paths here. + */ +function parseOtlpHeadersEnv(raw: string): Record { + const out: Record = {} + if (!raw) return out + for (const part of raw.split(',')) { + const trimmed = part.trim() + if (!trimmed) continue + const eq = trimmed.indexOf('=') + if (eq <= 0) continue + const key = trimmed.slice(0, eq).trim() + const rawVal = trimmed.slice(eq + 1).trim() + let val = rawVal + try { + val = decodeURIComponent(rawVal) + } catch { + // value wasn't URL-encoded; keep as-is. + } + if (key) out[key] = val + } + return out +} + +/** + * Normalize an OTLP base URL to the full traces-signal endpoint. + * + * The OTel HTTP exporter sends to whatever URL you give it verbatim + * — no signal-path appending. That's a footgun when the same env + * var also flows into the Go side, where the SDK *does* append + * `/v1/traces` automatically. We bridge the gap here so both halves + * of the mothership can share one endpoint value. + * + * Rules: + * - If the URL already has a non-root path, respect it (operator + * intent: "post to exactly this URL"). + * - Otherwise, append `/v1/traces`. + * - Malformed URLs pass through unchanged; the exporter will + * surface the error at first export. + */ +function normalizeOtlpTracesUrl(url: string): string { + if (!url) return url + try { + const u = new URL(url) + if (u.pathname && u.pathname !== '/') return url + return `${url.replace(/\/$/, '')}/v1/traces` + } catch { + return url + } +} + +/** + * Resolve the sampling ratio from env, with sensible fallbacks. + * + * Matches the Go side's `samplerFromEnv()` semantics so operators can + * control both halves of the mothership trace tree from the same + * variable. Invalid values degrade gracefully to the fallback. + */ +function resolveSamplingRatio(isLocalEndpoint: boolean): number { + const raw = process.env.TELEMETRY_SAMPLING_RATIO || process.env.OTEL_TRACES_SAMPLER_ARG || '' + if (raw) { + const parsed = Number.parseFloat(raw) + if (Number.isFinite(parsed)) { + if (parsed <= 0) return 0 + if (parsed >= 1) return 1 + return parsed + } + } + // Local dev gets 100% for deterministic manual verification. + // Production default is also 100% — the 1-day retention at the + // backend caps storage cost, not sampling. + return isLocalEndpoint ? 1.0 : 1.0 +} + /** * MothershipOriginSpanProcessor tags every span this process creates with * `mothership.origin` and prepends a `sim: ` prefix to the span name on @@ -106,11 +186,16 @@ async function initializeOpenTelemetry() { telemetryConfig = DEFAULT_TELEMETRY_CONFIG } - // Prefer process.env directly: @t3-oss/env-nextjs sometimes returns - // undefined for server vars that aren't listed in experimental__runtimeEnv, - // and TELEMETRY_ENDPOINT isn't mapped there. + // Endpoint resolution: prefer the OTel spec env var, fall back to + // our legacy TELEMETRY_ENDPOINT so existing deploys keep working + // during rollout. Read process.env directly because + // @t3-oss/env-nextjs sometimes returns undefined for server vars + // that aren't listed in experimental__runtimeEnv. const resolvedEndpoint = - process.env.TELEMETRY_ENDPOINT || env.TELEMETRY_ENDPOINT || telemetryConfig.endpoint + process.env.OTEL_EXPORTER_OTLP_ENDPOINT || + process.env.TELEMETRY_ENDPOINT || + env.TELEMETRY_ENDPOINT || + telemetryConfig.endpoint telemetryConfig = { ...telemetryConfig, endpoint: resolvedEndpoint, @@ -187,9 +272,24 @@ async function initializeOpenTelemetry() { }, }) + // Parse OTEL_EXPORTER_OTLP_HEADERS per the OTel spec: comma- + // separated `key=value` pairs, values optionally URL-encoded. This + // is how managed backends (Honeycomb, Grafana Cloud, New Relic) + // receive their API keys without needing a vendor-specific code + // path — flip the secret, redeploy, traces land in the new place. + const otlpHeaders = parseOtlpHeadersEnv(process.env.OTEL_EXPORTER_OTLP_HEADERS || '') + + // The @opentelemetry/exporter-trace-otlp-http exporter treats the + // `url` option as the complete POST target and does NOT append the + // `/v1/traces` signal path. The Go SDK, by contrast, does append + // it when only a host is given. Normalize here so operators can + // set the same `OTEL_EXPORTER_OTLP_ENDPOINT=https://api.honeycomb.io` + // for both services and have it Just Work. + const exporterUrl = normalizeOtlpTracesUrl(telemetryConfig.endpoint) + const exporter = new OTLPTraceExporter({ - url: telemetryConfig.endpoint, - headers: {}, + url: exporterUrl, + headers: otlpHeaders, timeoutMillis: Math.min(telemetryConfig.batchSettings.exportTimeoutMillis, 10000), keepAlive: false, }) @@ -244,14 +344,27 @@ async function initializeOpenTelemetry() { }) ) - // Dev / self-hosted OTLP backends (Jaeger/Tempo on localhost) should - // capture every trace so manual verification is deterministic. Keep 10% - // for production cloud endpoints. + // Sampling ratio resolution, in priority order: + // 1. `TELEMETRY_SAMPLING_RATIO` (our explicit, matches Go side) + // 2. `OTEL_TRACES_SAMPLER_ARG` (OTel spec env var) + // 3. 1.0 for local endpoints (so dev traces are deterministic) + // 4. 1.0 otherwise (production wants every mothership request — + // retention happens at the backend) + // + // `1.0` is the right default for mothership: every request is + // support-critical and we rely on the backend's retention (1 day + // in prod) to cap storage, not upstream sampling. const isLocalEndpoint = /localhost|127\.0\.0\.1/i.test(telemetryConfig.endpoint) - const samplingRatio = isLocalEndpoint ? 1.0 : 0.1 + const samplingRatio = resolveSamplingRatio(isLocalEndpoint) const rootRatioSampler = new TraceIdRatioBasedSampler(samplingRatio) const sampler = createBusinessSpanSampler(rootRatioSampler) + logger.info('OpenTelemetry sampler configured', { + samplingRatio, + endpoint: telemetryConfig.endpoint, + origin: MOTHERSHIP_ORIGIN, + }) + // Order matters: the origin-prefix processor must run BEFORE the batch // processor so the renamed span and the mothership.origin attribute are // captured on export. From a54470e3cd8b121da4b5997e09c6a076f70fc19f Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 16 Apr 2026 18:16:27 -0700 Subject: [PATCH 05/10] update otel --- apps/sim/instrumentation-node.ts | 65 +++++++++--- apps/sim/lib/copilot/chat/post.ts | 32 +++++- apps/sim/lib/copilot/request/otel.ts | 152 ++++++++++++++++++++++++++- 3 files changed, 230 insertions(+), 19 deletions(-) diff --git a/apps/sim/instrumentation-node.ts b/apps/sim/instrumentation-node.ts index 1695eb00d11..d08ccdd1817 100644 --- a/apps/sim/instrumentation-node.ts +++ b/apps/sim/instrumentation-node.ts @@ -7,12 +7,21 @@ * OTel `service.name = "mothership"` so every request shows up as one * service in the OTLP backend. To keep the two halves distinguishable: * - * - Every span emitted by this process is prefixed with `sim: ` on - * start, and gets a `mothership.origin = "sim"` attribute. - * - The Go side does the same with `go: ` / `mothership.origin = "go"`. + * - Every span emitted by the mothership lifecycle on this process is + * prefixed with `sim-mothership: ` on start, and gets a + * `mothership.origin = "sim-mothership"` attribute. + * - The Go side does the same with `go-mothership: ` / + * `mothership.origin = "go-mothership"`. * - * So in Jaeger/Tempo, filtering by `mothership.origin` (exact) or by - * operation name prefix (`sim:` / `go:`) cleanly splits the two halves. + * The `-mothership` suffix on the origin is deliberate: this Sim process + * hosts plenty of non-mothership code (workflow executor, block runtime, + * indexer clients) that may emit its own traces in the future. Making + * the origin value explicit means a later "sim" origin can't collide + * with the mothership side. + * + * So in any OTLP backend, filter by `mothership.origin` (exact) or by + * operation name prefix (`sim-mothership:` / `go-mothership:`) to + * cleanly split the two halves. */ import type { Attributes, Context, Link, SpanKind } from '@opentelemetry/api' @@ -31,9 +40,18 @@ diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.ERROR) const logger = createLogger('OTelInstrumentation') -const MOTHERSHIP_ORIGIN = 'sim' as const +// Origin value lives on every mothership span as `mothership.origin`. +// Longer form intentionally used (vs. plain "sim") so non-mothership +// code running in this same Sim process can't collide if it later +// starts emitting its own traces. +const MOTHERSHIP_ORIGIN = 'sim-mothership' as const const SPAN_NAME_PREFIX = `${MOTHERSHIP_ORIGIN}: ` +// Short slug used only for `service.instance.id`. Kept as plain "sim" +// so the instance id reads as `mothership-sim` — concise, already +// scoped by `service.name = "mothership"` as the container. +const SERVICE_INSTANCE_SLUG = 'sim' as const + const DEFAULT_TELEMETRY_CONFIG = { endpoint: env.TELEMETRY_ENDPOINT || 'https://telemetry.simstudio.ai/v1/traces', // Joint Sim+Go service surface in Jaeger/Tempo. See header comment. @@ -147,18 +165,29 @@ function resolveSamplingRatio(isLocalEndpoint: boolean): number { } /** - * MothershipOriginSpanProcessor tags every span this process creates with - * `mothership.origin` and prepends a `sim: ` prefix to the span name on - * start, before any downstream processor (BatchSpanProcessor) reads it. + * MothershipOriginSpanProcessor tags mothership-lifecycle spans with + * `mothership.origin` and prepends the origin prefix to the span name + * on start, before any downstream processor (BatchSpanProcessor) + * reads it. * - * Implemented as its own processor rather than a resource attribute so - * the backend span/operation list (which keys on span name) is visually - * split between sim and go even when both share service.name. + * Gated on `isBusinessSpan(name)` so only spans that already match + * the mothership allowlist get the label. The sampler drops + * non-mothership roots anyway, but keeping the tagger conditional + * means that if the sampler is ever relaxed (or a different + * instrumentation stream is added alongside mothership), unrelated + * spans won't accidentally inherit the mothership origin. + * + * Implemented as its own processor rather than a resource attribute + * so the backend span/operation list (which keys on span name) is + * visually split between sim and go even when both share service.name. */ class MothershipOriginSpanProcessor implements SpanProcessor { onStart(span: Span): void { - span.setAttribute('mothership.origin', MOTHERSHIP_ORIGIN) const name = span.name + if (!isBusinessSpan(name)) { + return + } + span.setAttribute('mothership.origin', MOTHERSHIP_ORIGIN) if (!name.startsWith(SPAN_NAME_PREFIX)) { span.updateName(`${SPAN_NAME_PREFIX}${name}`) } @@ -326,10 +355,12 @@ async function initializeOpenTelemetry() { // multi-second cross-machine clock drift within one group, and its // adjuster emits spurious "parent is not in the trace; skipping // clock skew adjustment" warnings on every cross-process child. - // Stable per-origin instance ID (`mothership-sim` / `mothership-go`) - // is enough to split the groups cleanly; Jaeger still shows both - // under the single `mothership` service in its service picker. - const serviceInstanceId = `${telemetryConfig.serviceName}-${MOTHERSHIP_ORIGIN}` + // Using the short slug (`sim` / `go`) keeps the instance id as + // `mothership-sim` / `mothership-go` — already scoped by + // `service.name = "mothership"` as the container. The longer + // `mothership.origin = "sim-mothership"` value does the + // disambiguation at the attribute level. + const serviceInstanceId = `${telemetryConfig.serviceName}-${SERVICE_INSTANCE_SLUG}` const resource = defaultResource().merge( resourceFromAttributes({ [ATTR_SERVICE_NAME]: telemetryConfig.serviceName, diff --git a/apps/sim/lib/copilot/chat/post.ts b/apps/sim/lib/copilot/chat/post.ts index 543c5b8f8d2..4a2903dad05 100644 --- a/apps/sim/lib/copilot/chat/post.ts +++ b/apps/sim/lib/copilot/chat/post.ts @@ -374,10 +374,34 @@ function buildOnComplete(params: { requestId: string workspaceId?: string notifyWorkspaceStatus: boolean + /** + * Root agent span for this request. When present, the final + * assistant message + invoked tool calls are recorded as + * `gen_ai.output.messages` on it before persistence runs. Keeps + * the Honeycomb Gen AI view complete across both the Sim root + * span and the Go-side `llm.stream` spans. + */ + otelRoot?: { + setOutputMessages: (output: { + assistantText?: string + toolCalls?: Array<{ id: string; name: string; arguments?: Record }> + }) => void + } }) { - const { chatId, userMessageId, requestId, workspaceId, notifyWorkspaceStatus } = params + const { chatId, userMessageId, requestId, workspaceId, notifyWorkspaceStatus, otelRoot } = params return async (result: OrchestratorResult) => { + if (otelRoot && result.success) { + otelRoot.setOutputMessages({ + assistantText: result.content, + toolCalls: result.toolCalls?.map((tc) => ({ + id: tc.id, + name: tc.name, + arguments: tc.params, + })), + }) + } + if (!chatId) return try { @@ -601,6 +625,11 @@ export async function handleUnifiedChatPost(req: NextRequest) { runId, transport: 'stream', }) + // Emit `gen_ai.input.messages` on the root agent span for OTel + // GenAI spec compliance (Honeycomb's Gen AI view keys off this). + // Gated on OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT + // internally — safe to always call. + otelRoot.setInputMessages({ userMessage: body.message }) // Wrap the rest of the handler so every nested withCopilotSpan / // withDbSpan (persistUserMessage, createRunSegment, resolveBranch DB @@ -799,6 +828,7 @@ export async function handleUnifiedChatPost(req: NextRequest) { requestId: tracker.requestId, workspaceId, notifyWorkspaceStatus: branch.notifyWorkspaceStatus, + otelRoot, }), onError: buildOnError({ chatId: actualChatId, diff --git a/apps/sim/lib/copilot/request/otel.ts b/apps/sim/lib/copilot/request/otel.ts index 48d13784179..f4d57b51688 100644 --- a/apps/sim/lib/copilot/request/otel.ts +++ b/apps/sim/lib/copilot/request/otel.ts @@ -14,6 +14,136 @@ import type { RequestTraceV1Outcome } from '@/lib/copilot/generated/request-trac import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { contextFromRequestHeaders } from '@/lib/copilot/request/go/propagation' +/** + * OTel GenAI experimental semantic conventions env var. When set to a + * truthy value, each `gen_ai.*` span carries the full input and + * output conversation content as attributes. Mirrors the Go-side + * gate in `copilot/internal/providers/telemetry.go` so operators + * control both halves with one variable. + * + * Spec: https://opentelemetry.io/docs/specs/semconv/gen-ai/ + */ +const GENAI_CAPTURE_ENV = 'OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT' + +/** + * Attribute-size cap for `gen_ai.{input,output}.messages`. Most OTLP + * backends reject attributes larger than ~64 KiB, so we truncate + * proactively to keep the rest of the span alive if a conversation + * runs long. Matches the Go-side cap to keep truncation behavior + * symmetrical between the two halves. + */ +const GENAI_MESSAGE_ATTR_MAX_BYTES = 60 * 1024 + +function isGenAIMessageCaptureEnabled(): boolean { + const raw = (process.env[GENAI_CAPTURE_ENV] || '').toLowerCase().trim() + return raw === 'true' || raw === '1' || raw === 'yes' +} + +/** + * Canonical OTel GenAI message shape used for both input and output + * attributes. Kept minimal — only the three part types we actually + * emit: `text`, `tool_call`, and `tool_call_response`. Adding more + * part types is cheap, but every additional shape here has to be + * mirrored in the Go serializer. + */ +interface GenAIAgentPart { + type: 'text' | 'tool_call' | 'tool_call_response' + content?: string + id?: string + name?: string + arguments?: Record + response?: string +} + +interface GenAIAgentMessage { + role: 'system' | 'user' | 'assistant' | 'tool' + parts: GenAIAgentPart[] +} + +function marshalAgentMessages(messages: GenAIAgentMessage[]): string | undefined { + if (messages.length === 0) return undefined + const json = JSON.stringify(messages) + if (json.length <= GENAI_MESSAGE_ATTR_MAX_BYTES) return json + // Simple tail-preserving truncation: drop from the front until we + // fit. Matches the Go side's behavior. The last message is + // usually the most diagnostic for span-level outcome. + let remaining = messages.slice() + while (remaining.length > 1) { + remaining = remaining.slice(1) + const candidate = JSON.stringify(remaining) + if (candidate.length <= GENAI_MESSAGE_ATTR_MAX_BYTES) return candidate + } + // Single message still over cap — truncate the text part in place + // with a marker so the partial content is still readable. + const only = remaining[0] + for (const part of only.parts) { + if (part.type === 'text' && part.content) { + const headroom = GENAI_MESSAGE_ATTR_MAX_BYTES - 1024 + if (part.content.length > headroom) { + part.content = `${part.content.slice(0, headroom)}\n\n[truncated: capture cap ${GENAI_MESSAGE_ATTR_MAX_BYTES} bytes]` + } + } + } + const final = JSON.stringify([only]) + return final.length <= GENAI_MESSAGE_ATTR_MAX_BYTES ? final : undefined +} + +export interface CopilotAgentInputMessages { + userMessage?: string + systemPrompt?: string +} + +export interface CopilotAgentOutputMessages { + assistantText?: string + toolCalls?: Array<{ + id: string + name: string + arguments?: Record + }> +} + +function setAgentInputMessages(span: Span, input: CopilotAgentInputMessages): void { + if (!isGenAIMessageCaptureEnabled()) return + const messages: GenAIAgentMessage[] = [] + if (input.systemPrompt) { + messages.push({ + role: 'system', + parts: [{ type: 'text', content: input.systemPrompt }], + }) + } + if (input.userMessage) { + messages.push({ + role: 'user', + parts: [{ type: 'text', content: input.userMessage }], + }) + } + const serialized = marshalAgentMessages(messages) + if (serialized) { + span.setAttribute('gen_ai.input.messages', serialized) + } +} + +function setAgentOutputMessages(span: Span, output: CopilotAgentOutputMessages): void { + if (!isGenAIMessageCaptureEnabled()) return + const parts: GenAIAgentPart[] = [] + if (output.assistantText) { + parts.push({ type: 'text', content: output.assistantText }) + } + for (const tc of output.toolCalls ?? []) { + parts.push({ + type: 'tool_call', + id: tc.id, + name: tc.name, + ...(tc.arguments ? { arguments: tc.arguments } : {}), + }) + } + if (parts.length === 0) return + const serialized = marshalAgentMessages([{ role: 'assistant', parts }]) + if (serialized) { + span.setAttribute('gen_ai.output.messages', serialized) + } +} + /** * Reuse the generated RequestTraceV1Outcome string values for every * lifecycle outcome field. This keeps our OTel attributes, internal @@ -262,6 +392,20 @@ export interface CopilotOtelRoot { span: Span context: Context finish: (outcome?: CopilotLifecycleOutcome, error?: unknown) => void + /** + * Record `gen_ai.input.messages` on the root agent span. Gated on + * `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT` — no-op when + * capture is disabled. Safe to call multiple times; the latest + * call wins. + */ + setInputMessages: (input: CopilotAgentInputMessages) => void + /** + * Record `gen_ai.output.messages` on the root agent span. Gated on + * the same env var as `setInputMessages`. Typically called from the + * stream finalize callback once the assistant's final content and + * invoked tool calls are known. + */ + setOutputMessages: (output: CopilotAgentOutputMessages) => void } export function startCopilotOtelRoot(scope: CopilotOtelScope): CopilotOtelRoot { @@ -300,7 +444,13 @@ export function startCopilotOtelRoot(scope: CopilotOtelScope): CopilotOtelRoot { span.end() } - return { span, context: rootContext, finish } + return { + span, + context: rootContext, + finish, + setInputMessages: (input) => setAgentInputMessages(span, input), + setOutputMessages: (output) => setAgentOutputMessages(span, output), + } } export async function withCopilotOtelContext( From c2d9dd95fae73c50077d62852fcc5a84a2ec81a1 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 17 Apr 2026 14:23:12 -0700 Subject: [PATCH 06/10] Otel --- apps/sim/app/api/billing/update-cost/route.ts | 45 +- .../api/copilot/api-keys/generate/route.ts | 3 +- apps/sim/app/api/copilot/api-keys/route.ts | 5 +- .../api/copilot/api-keys/validate/route.ts | 33 +- .../api/copilot/auto-allowed-tools/route.ts | 7 +- apps/sim/app/api/copilot/chat/abort/route.ts | 209 ++-- apps/sim/app/api/copilot/chat/stop/route.ts | 170 +-- apps/sim/app/api/copilot/chat/stream/route.ts | 27 +- apps/sim/app/api/copilot/confirm/route.ts | 194 ++-- apps/sim/instrumentation-node.ts | 42 +- apps/sim/lib/copilot/async-runs/repository.ts | 7 +- apps/sim/lib/copilot/chat/post.ts | 223 ++-- apps/sim/lib/copilot/chat/terminal-state.ts | 7 +- .../copilot/generated/trace-attributes-v1.ts | 968 ++++++++++++++++++ .../lib/copilot/generated/trace-spans-v1.ts | 38 + apps/sim/lib/copilot/request/go/fetch.ts | 21 +- apps/sim/lib/copilot/request/go/stream.ts | 5 +- apps/sim/lib/copilot/request/handlers/tool.ts | 74 +- .../lib/copilot/request/lifecycle/finalize.ts | 13 +- .../lib/copilot/request/lifecycle/start.ts | 15 +- apps/sim/lib/copilot/request/otel.ts | 161 ++- apps/sim/lib/copilot/request/session/abort.ts | 116 ++- .../copilot/request/session/explicit-abort.ts | 3 +- .../lib/copilot/request/session/recovery.ts | 9 +- apps/sim/lib/copilot/request/subagent.ts | 13 +- .../sim/lib/copilot/request/tools/executor.ts | 8 +- apps/sim/lib/copilot/request/tools/files.ts | 13 +- .../lib/copilot/request/tools/resources.ts | 13 +- apps/sim/lib/copilot/request/tools/tables.ts | 39 +- apps/sim/lib/copilot/request/trace.ts | 5 +- apps/sim/lib/copilot/vfs/file-reader.ts | 95 +- apps/sim/lib/core/telemetry.ts | 19 +- package.json | 6 +- scripts/sync-trace-attributes-contract.ts | 168 +++ 34 files changed, 2197 insertions(+), 577 deletions(-) create mode 100644 apps/sim/lib/copilot/generated/trace-attributes-v1.ts create mode 100644 scripts/sync-trace-attributes-contract.ts diff --git a/apps/sim/app/api/billing/update-cost/route.ts b/apps/sim/app/api/billing/update-cost/route.ts index 6147f27e607..4799b485221 100644 --- a/apps/sim/app/api/billing/update-cost/route.ts +++ b/apps/sim/app/api/billing/update-cost/route.ts @@ -4,6 +4,7 @@ import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { recordUsage } from '@/lib/billing/core/usage-log' import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { checkInternalApiKey } from '@/lib/copilot/request/http' import { withIncomingGoSpan } from '@/lib/copilot/request/otel' @@ -59,8 +60,8 @@ async function updateCostInner( logger.info(`[${requestId}] Update cost request started`) if (!isBillingEnabled) { - span.setAttribute('billing.outcome', 'billing_disabled') - span.setAttribute('http.status_code', 200) + span.setAttribute(TraceAttr.BillingOutcome, 'billing_disabled') + span.setAttribute(TraceAttr.HttpStatusCode, 200) return NextResponse.json({ success: true, message: 'Billing disabled, cost update skipped', @@ -76,8 +77,8 @@ async function updateCostInner( const authResult = checkInternalApiKey(req) if (!authResult.success) { logger.warn(`[${requestId}] Authentication failed: ${authResult.error}`) - span.setAttribute('billing.outcome', 'auth_failed') - span.setAttribute('http.status_code', 401) + span.setAttribute(TraceAttr.BillingOutcome, 'auth_failed') + span.setAttribute(TraceAttr.HttpStatusCode, 401) return NextResponse.json( { success: false, @@ -95,8 +96,8 @@ async function updateCostInner( errors: validation.error.issues, body, }) - span.setAttribute('billing.outcome', 'invalid_body') - span.setAttribute('http.status_code', 400) + span.setAttribute(TraceAttr.BillingOutcome, 'invalid_body') + span.setAttribute(TraceAttr.HttpStatusCode, 400) return NextResponse.json( { success: false, @@ -112,14 +113,14 @@ async function updateCostInner( const isMcp = source === 'mcp_copilot' span.setAttributes({ - 'user.id': userId, - 'gen_ai.request.model': model, - 'billing.source': source, - 'billing.cost_usd': cost, - 'gen_ai.usage.input_tokens': inputTokens, - 'gen_ai.usage.output_tokens': outputTokens, - 'billing.is_mcp': isMcp, - ...(idempotencyKey ? { 'billing.idempotency_key': idempotencyKey } : {}), + [TraceAttr.UserId]: userId, + [TraceAttr.GenAiRequestModel]: model, + [TraceAttr.BillingSource]: source, + [TraceAttr.BillingCostUsd]: cost, + [TraceAttr.GenAiUsageInputTokens]: inputTokens, + [TraceAttr.GenAiUsageOutputTokens]: outputTokens, + [TraceAttr.BillingIsMcp]: isMcp, + ...(idempotencyKey ? { [TraceAttr.BillingIdempotencyKey]: idempotencyKey } : {}), }) claim = idempotencyKey @@ -132,8 +133,8 @@ async function updateCostInner( userId, source, }) - span.setAttribute('billing.outcome', 'duplicate_idempotency_key') - span.setAttribute('http.status_code', 409) + span.setAttribute(TraceAttr.BillingOutcome, 'duplicate_idempotency_key') + span.setAttribute(TraceAttr.HttpStatusCode, 409) return NextResponse.json( { success: false, @@ -198,9 +199,9 @@ async function updateCostInner( cost, }) - span.setAttribute('billing.outcome', 'billed') - span.setAttribute('http.status_code', 200) - span.setAttribute('billing.duration_ms', duration) + span.setAttribute(TraceAttr.BillingOutcome, 'billed') + span.setAttribute(TraceAttr.HttpStatusCode, 200) + span.setAttribute(TraceAttr.BillingDurationMs, duration) return NextResponse.json({ success: true, data: { @@ -235,9 +236,9 @@ async function updateCostInner( ) } - span.setAttribute('billing.outcome', 'internal_error') - span.setAttribute('http.status_code', 500) - span.setAttribute('billing.duration_ms', duration) + span.setAttribute(TraceAttr.BillingOutcome, 'internal_error') + span.setAttribute(TraceAttr.HttpStatusCode, 500) + span.setAttribute(TraceAttr.BillingDurationMs, duration) return NextResponse.json( { success: false, diff --git a/apps/sim/app/api/copilot/api-keys/generate/route.ts b/apps/sim/app/api/copilot/api-keys/generate/route.ts index 66febeed24f..950bb9f16cb 100644 --- a/apps/sim/app/api/copilot/api-keys/generate/route.ts +++ b/apps/sim/app/api/copilot/api-keys/generate/route.ts @@ -2,6 +2,7 @@ import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { getSession } from '@/lib/auth' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { fetchGo } from '@/lib/copilot/request/go/fetch' import { env } from '@/lib/core/config/env' @@ -42,7 +43,7 @@ export async function POST(req: NextRequest) { body: JSON.stringify({ userId, name }), spanName: 'sim → go /api/validate-key/generate', operation: 'generate_api_key', - attributes: { 'user.id': userId }, + attributes: { [TraceAttr.UserId]: userId }, }) if (!res.ok) { diff --git a/apps/sim/app/api/copilot/api-keys/route.ts b/apps/sim/app/api/copilot/api-keys/route.ts index 4b4d5862ba5..89a88d381dc 100644 --- a/apps/sim/app/api/copilot/api-keys/route.ts +++ b/apps/sim/app/api/copilot/api-keys/route.ts @@ -1,6 +1,7 @@ import { type NextRequest, NextResponse } from 'next/server' import { getSession } from '@/lib/auth' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { fetchGo } from '@/lib/copilot/request/go/fetch' import { env } from '@/lib/core/config/env' @@ -22,7 +23,7 @@ export async function GET(request: NextRequest) { body: JSON.stringify({ userId }), spanName: 'sim → go /api/validate-key/get-api-keys', operation: 'get_api_keys', - attributes: { 'user.id': userId }, + attributes: { [TraceAttr.UserId]: userId }, }) if (!res.ok) { @@ -79,7 +80,7 @@ export async function DELETE(request: NextRequest) { body: JSON.stringify({ userId, apiKeyId: id }), spanName: 'sim → go /api/validate-key/delete', operation: 'delete_api_key', - attributes: { 'user.id': userId, 'api_key.id': id }, + attributes: { [TraceAttr.UserId]: userId, [TraceAttr.ApiKeyId]: id }, }) if (!res.ok) { diff --git a/apps/sim/app/api/copilot/api-keys/validate/route.ts b/apps/sim/app/api/copilot/api-keys/validate/route.ts index 286ab7ff9ec..6852881c570 100644 --- a/apps/sim/app/api/copilot/api-keys/validate/route.ts +++ b/apps/sim/app/api/copilot/api-keys/validate/route.ts @@ -5,6 +5,7 @@ import { eq } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { checkServerSideUsageLimits } from '@/lib/billing/calculations/usage-monitor' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { checkInternalApiKey } from '@/lib/copilot/request/http' import { withIncomingGoSpan } from '@/lib/copilot/request/otel' @@ -31,8 +32,8 @@ export async function POST(req: NextRequest) { try { const auth = checkInternalApiKey(req) if (!auth.success) { - span.setAttribute('copilot.validate.outcome', 'internal_auth_failed') - span.setAttribute('http.status_code', 401) + span.setAttribute(TraceAttr.CopilotValidateOutcome, 'internal_auth_failed') + span.setAttribute(TraceAttr.HttpStatusCode, 401) return new NextResponse(null, { status: 401 }) } @@ -40,8 +41,8 @@ export async function POST(req: NextRequest) { const validationResult = ValidateApiKeySchema.safeParse(body) if (!validationResult.success) { logger.warn('Invalid validation request', { errors: validationResult.error.errors }) - span.setAttribute('copilot.validate.outcome', 'invalid_body') - span.setAttribute('http.status_code', 400) + span.setAttribute(TraceAttr.CopilotValidateOutcome, 'invalid_body') + span.setAttribute(TraceAttr.HttpStatusCode, 400) return NextResponse.json( { error: 'userId is required', @@ -52,22 +53,22 @@ export async function POST(req: NextRequest) { } const { userId } = validationResult.data - span.setAttribute('user.id', userId) + span.setAttribute(TraceAttr.UserId, userId) const [existingUser] = await db.select().from(user).where(eq(user.id, userId)).limit(1) if (!existingUser) { logger.warn('[API VALIDATION] userId does not exist', { userId }) - span.setAttribute('copilot.validate.outcome', 'user_not_found') - span.setAttribute('http.status_code', 403) + span.setAttribute(TraceAttr.CopilotValidateOutcome, 'user_not_found') + span.setAttribute(TraceAttr.HttpStatusCode, 403) return NextResponse.json({ error: 'User not found' }, { status: 403 }) } logger.info('[API VALIDATION] Validating usage limit', { userId }) const { isExceeded, currentUsage, limit } = await checkServerSideUsageLimits(userId) span.setAttributes({ - 'billing.usage.current': currentUsage, - 'billing.usage.limit': limit, - 'billing.usage.exceeded': isExceeded, + [TraceAttr.BillingUsageCurrent]: currentUsage, + [TraceAttr.BillingUsageLimit]: limit, + [TraceAttr.BillingUsageExceeded]: isExceeded, }) logger.info('[API VALIDATION] Usage limit validated', { @@ -79,18 +80,18 @@ export async function POST(req: NextRequest) { if (isExceeded) { logger.info('[API VALIDATION] Usage exceeded', { userId, currentUsage, limit }) - span.setAttribute('copilot.validate.outcome', 'usage_exceeded') - span.setAttribute('http.status_code', 402) + span.setAttribute(TraceAttr.CopilotValidateOutcome, 'usage_exceeded') + span.setAttribute(TraceAttr.HttpStatusCode, 402) return new NextResponse(null, { status: 402 }) } - span.setAttribute('copilot.validate.outcome', 'ok') - span.setAttribute('http.status_code', 200) + span.setAttribute(TraceAttr.CopilotValidateOutcome, 'ok') + span.setAttribute(TraceAttr.HttpStatusCode, 200) return new NextResponse(null, { status: 200 }) } catch (error) { logger.error('Error validating usage limit', { error }) - span.setAttribute('copilot.validate.outcome', 'internal_error') - span.setAttribute('http.status_code', 500) + span.setAttribute(TraceAttr.CopilotValidateOutcome, 'internal_error') + span.setAttribute(TraceAttr.HttpStatusCode, 500) return NextResponse.json({ error: 'Failed to validate usage' }, { status: 500 }) } } diff --git a/apps/sim/app/api/copilot/auto-allowed-tools/route.ts b/apps/sim/app/api/copilot/auto-allowed-tools/route.ts index e02262d83ef..a00fd80e5bc 100644 --- a/apps/sim/app/api/copilot/auto-allowed-tools/route.ts +++ b/apps/sim/app/api/copilot/auto-allowed-tools/route.ts @@ -2,6 +2,7 @@ import { createLogger } from '@sim/logger' import { type NextRequest, NextResponse } from 'next/server' import { getSession } from '@/lib/auth' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { fetchGo } from '@/lib/copilot/request/go/fetch' import { env } from '@/lib/core/config/env' @@ -38,7 +39,7 @@ export async function GET() { headers: copilotHeaders(), spanName: 'sim → go /api/tool-preferences/auto-allowed', operation: 'list_auto_allowed_tools', - attributes: { 'user.id': userId }, + attributes: { [TraceAttr.UserId]: userId }, } ) @@ -79,7 +80,7 @@ export async function POST(request: NextRequest) { body: JSON.stringify({ userId, toolId: body.toolId }), spanName: 'sim → go /api/tool-preferences/auto-allowed', operation: 'add_auto_allowed_tool', - attributes: { 'user.id': userId, 'tool.id': body.toolId }, + attributes: { [TraceAttr.UserId]: userId, [TraceAttr.ToolId]: body.toolId }, }) if (!res.ok) { @@ -124,7 +125,7 @@ export async function DELETE(request: NextRequest) { headers: copilotHeaders(), spanName: 'sim → go /api/tool-preferences/auto-allowed', operation: 'remove_auto_allowed_tool', - attributes: { 'user.id': userId, 'tool.id': toolId }, + attributes: { [TraceAttr.UserId]: userId, [TraceAttr.ToolId]: toolId }, } ) diff --git a/apps/sim/app/api/copilot/chat/abort/route.ts b/apps/sim/app/api/copilot/chat/abort/route.ts index 4b836be8022..46c003c1a9e 100644 --- a/apps/sim/app/api/copilot/chat/abort/route.ts +++ b/apps/sim/app/api/copilot/chat/abort/route.ts @@ -2,8 +2,11 @@ import { createLogger } from '@sim/logger' import { NextResponse } from 'next/server' import { getLatestRunForStream } from '@/lib/copilot/async-runs/repository' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { fetchGo } from '@/lib/copilot/request/go/fetch' import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http' +import { withCopilotSpan, withIncomingGoSpan } from '@/lib/copilot/request/otel' import { abortActiveStream, waitForPendingChatStream } from '@/lib/copilot/request/session' import { env } from '@/lib/core/config/env' @@ -11,87 +14,143 @@ const logger = createLogger('CopilotChatAbortAPI') const GO_EXPLICIT_ABORT_TIMEOUT_MS = 3000 const STREAM_ABORT_SETTLE_TIMEOUT_MS = 8000 +/** + * POST /api/copilot/chat/abort + * + * Hang-critical: the client calls this when the user hits "stop". It + * fans out to Go (explicit-abort marker) and then waits up to + * STREAM_ABORT_SETTLE_TIMEOUT_MS (8s) for the prior chat stream to + * unwind. If EITHER the Go fetch or the settle-wait hangs, the user + * sees a "still shutting down" 409 — or worse, an unresolved Promise + * on the client. The spans below pinpoint which phase stalled. + */ export async function POST(request: Request) { - const { userId: authenticatedUserId, isAuthenticated } = - await authenticateCopilotRequestSessionOnly() + return withIncomingGoSpan( + request.headers, + TraceSpan.CopilotChatAbortStream, + undefined, + async (rootSpan) => { + const { userId: authenticatedUserId, isAuthenticated } = + await authenticateCopilotRequestSessionOnly() - if (!isAuthenticated || !authenticatedUserId) { - return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) - } + if (!isAuthenticated || !authenticatedUserId) { + rootSpan.setAttribute(TraceAttr.CopilotAbortOutcome, 'unauthorized') + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) + } - const body = await request.json().catch((err) => { - logger.warn('Abort request body parse failed; continuing with empty object', { - error: err instanceof Error ? err.message : String(err), - }) - return {} - }) - const streamId = typeof body.streamId === 'string' ? body.streamId : '' - let chatId = typeof body.chatId === 'string' ? body.chatId : '' - - if (!streamId) { - return NextResponse.json({ error: 'streamId is required' }, { status: 400 }) - } + const body = await request.json().catch((err) => { + logger.warn('Abort request body parse failed; continuing with empty object', { + error: err instanceof Error ? err.message : String(err), + }) + return {} + }) + const streamId = typeof body.streamId === 'string' ? body.streamId : '' + let chatId = typeof body.chatId === 'string' ? body.chatId : '' - if (!chatId) { - const run = await getLatestRunForStream(streamId, authenticatedUserId).catch((err) => { - logger.warn('getLatestRunForStream failed while resolving chatId for abort', { - streamId, - error: err instanceof Error ? err.message : String(err), + if (!streamId) { + rootSpan.setAttribute(TraceAttr.CopilotAbortOutcome, 'missing_stream_id') + return NextResponse.json({ error: 'streamId is required' }, { status: 400 }) + } + rootSpan.setAttributes({ + [TraceAttr.StreamId]: streamId, + [TraceAttr.UserId]: authenticatedUserId, }) - return null - }) - if (run?.chatId) { - chatId = run.chatId - } - } - try { - const headers: Record = { 'Content-Type': 'application/json' } - if (env.COPILOT_API_KEY) { - headers['x-api-key'] = env.COPILOT_API_KEY - } - const controller = new AbortController() - const timeout = setTimeout( - () => controller.abort('timeout:go_explicit_abort_fetch'), - GO_EXPLICIT_ABORT_TIMEOUT_MS - ) - const response = await fetchGo(`${SIM_AGENT_API_URL}/api/streams/explicit-abort`, { - method: 'POST', - headers, - signal: controller.signal, - body: JSON.stringify({ - messageId: streamId, - userId: authenticatedUserId, - ...(chatId ? { chatId } : {}), - }), - spanName: 'sim → go /api/streams/explicit-abort', - operation: 'explicit_abort', - attributes: { - 'copilot.stream.id': streamId, - ...(chatId ? { 'chat.id': chatId } : {}), - }, - }).finally(() => clearTimeout(timeout)) - if (!response.ok) { - throw new Error(`Explicit abort marker request failed: ${response.status}`) - } - } catch (err) { - logger.warn('Explicit abort marker request failed; proceeding with local abort', { - streamId, - error: err instanceof Error ? err.message : String(err), - }) - } + if (!chatId) { + const run = await getLatestRunForStream(streamId, authenticatedUserId).catch((err) => { + logger.warn('getLatestRunForStream failed while resolving chatId for abort', { + streamId, + error: err instanceof Error ? err.message : String(err), + }) + return null + }) + if (run?.chatId) { + chatId = run.chatId + } + } + if (chatId) rootSpan.setAttribute(TraceAttr.ChatId, chatId) - const aborted = await abortActiveStream(streamId) - if (chatId) { - const settled = await waitForPendingChatStream(chatId, STREAM_ABORT_SETTLE_TIMEOUT_MS, streamId) - if (!settled) { - return NextResponse.json( - { error: 'Previous response is still shutting down', aborted, settled: false }, - { status: 409 } - ) - } - return NextResponse.json({ aborted, settled: true }) - } + let goAbortOk = false + try { + const headers: Record = { 'Content-Type': 'application/json' } + if (env.COPILOT_API_KEY) { + headers['x-api-key'] = env.COPILOT_API_KEY + } + const controller = new AbortController() + const timeout = setTimeout( + () => controller.abort('timeout:go_explicit_abort_fetch'), + GO_EXPLICIT_ABORT_TIMEOUT_MS + ) + const response = await fetchGo(`${SIM_AGENT_API_URL}/api/streams/explicit-abort`, { + method: 'POST', + headers, + signal: controller.signal, + body: JSON.stringify({ + messageId: streamId, + userId: authenticatedUserId, + ...(chatId ? { chatId } : {}), + }), + spanName: 'sim → go /api/streams/explicit-abort', + operation: 'explicit_abort', + attributes: { + 'copilot.stream.id': streamId, + ...(chatId ? { [TraceAttr.ChatId]: chatId } : {}), + }, + }).finally(() => clearTimeout(timeout)) + if (!response.ok) { + throw new Error(`Explicit abort marker request failed: ${response.status}`) + } + goAbortOk = true + } catch (err) { + logger.warn('Explicit abort marker request failed; proceeding with local abort', { + streamId, + error: err instanceof Error ? err.message : String(err), + }) + } + rootSpan.setAttribute(TraceAttr.CopilotAbortGoMarkerOk, goAbortOk) + + const aborted = await abortActiveStream(streamId) + rootSpan.setAttribute(TraceAttr.CopilotAbortLocalAborted, aborted) - return NextResponse.json({ aborted }) + if (chatId) { + // `waitForPendingChatStream` blocks up to 8s waiting for the + // prior stream's release. It's THE single most likely stall + // point in this handler — isolate it so a slow unwind shows up + // as this child span rather than unexplained root latency. + const settled = await withCopilotSpan( + TraceSpan.CopilotChatAbortWaitSettle, + { + 'chat.id': chatId, + 'stream.id': streamId, + 'settle.timeout_ms': STREAM_ABORT_SETTLE_TIMEOUT_MS, + }, + async (settleSpan) => { + const start = Date.now() + const ok = await waitForPendingChatStream( + chatId, + STREAM_ABORT_SETTLE_TIMEOUT_MS, + streamId + ) + settleSpan.setAttributes({ + [TraceAttr.SettleWaitMs]: Date.now() - start, + [TraceAttr.SettleCompleted]: ok, + }) + return ok + } + ) + if (!settled) { + rootSpan.setAttribute(TraceAttr.CopilotAbortOutcome, 'settle_timeout') + return NextResponse.json( + { error: 'Previous response is still shutting down', aborted, settled: false }, + { status: 409 } + ) + } + rootSpan.setAttribute(TraceAttr.CopilotAbortOutcome, 'settled') + return NextResponse.json({ aborted, settled: true }) + } + + rootSpan.setAttribute(TraceAttr.CopilotAbortOutcome, 'no_chat_id') + return NextResponse.json({ aborted }) + } + ) } diff --git a/apps/sim/app/api/copilot/chat/stop/route.ts b/apps/sim/app/api/copilot/chat/stop/route.ts index 05e5935aa40..71708838ff9 100644 --- a/apps/sim/app/api/copilot/chat/stop/route.ts +++ b/apps/sim/app/api/copilot/chat/stop/route.ts @@ -6,6 +6,9 @@ import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { getSession } from '@/lib/auth' import { normalizeMessage, type PersistedMessage } from '@/lib/copilot/chat/persisted-message' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' +import { withIncomingGoSpan } from '@/lib/copilot/request/otel' import { taskPubSub } from '@/lib/copilot/tasks' import { generateId } from '@/lib/core/utils/uuid' @@ -62,87 +65,112 @@ const StopSchema = z.object({ * Clears conversationId so the server-side onComplete won't duplicate the message. * The chat stream lock is intentionally left alone here; it is released only once * the aborted server stream actually unwinds. + * + * Hang-critical: runs a DB SELECT + UPDATE + pubsub publish. A slow DB + * here makes the UI look frozen after the user clicks Stop. The root + * span lets us tell whether stalls are DB-bound or pubsub-bound. */ export async function POST(req: NextRequest) { - try { - const session = await getSession() - if (!session?.user?.id) { - return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) - } + return withIncomingGoSpan( + req.headers, + TraceSpan.CopilotChatStopStream, + undefined, + async (span) => { + try { + const session = await getSession() + if (!session?.user?.id) { + span.setAttribute(TraceAttr.CopilotStopOutcome, 'unauthorized') + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) + } - const { chatId, streamId, content, contentBlocks } = StopSchema.parse(await req.json()) - const [row] = await db - .select({ - workspaceId: copilotChats.workspaceId, - messages: copilotChats.messages, - }) - .from(copilotChats) - .where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, session.user.id))) - .limit(1) + const { chatId, streamId, content, contentBlocks } = StopSchema.parse(await req.json()) + span.setAttributes({ + [TraceAttr.ChatId]: chatId, + [TraceAttr.StreamId]: streamId, + [TraceAttr.UserId]: session.user.id, + [TraceAttr.CopilotStopContentLength]: content.length, + [TraceAttr.CopilotStopBlocksCount]: contentBlocks?.length ?? 0, + }) - if (!row) { - return NextResponse.json({ success: true }) - } + const [row] = await db + .select({ + workspaceId: copilotChats.workspaceId, + messages: copilotChats.messages, + }) + .from(copilotChats) + .where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, session.user.id))) + .limit(1) - const messages: Record[] = Array.isArray(row.messages) ? row.messages : [] - const userIdx = messages.findIndex((message) => message.id === streamId) - const alreadyHasResponse = - userIdx >= 0 && - userIdx + 1 < messages.length && - (messages[userIdx + 1] as Record)?.role === 'assistant' - const canAppendAssistant = - userIdx >= 0 && userIdx === messages.length - 1 && !alreadyHasResponse + if (!row) { + span.setAttribute(TraceAttr.CopilotStopOutcome, 'chat_not_found') + return NextResponse.json({ success: true }) + } - const updateWhere = and( - eq(copilotChats.id, chatId), - eq(copilotChats.userId, session.user.id), - eq(copilotChats.conversationId, streamId) - ) + const messages: Record[] = Array.isArray(row.messages) ? row.messages : [] + const userIdx = messages.findIndex((message) => message.id === streamId) + const alreadyHasResponse = + userIdx >= 0 && + userIdx + 1 < messages.length && + (messages[userIdx + 1] as Record)?.role === 'assistant' + const canAppendAssistant = + userIdx >= 0 && userIdx === messages.length - 1 && !alreadyHasResponse - const setClause: Record = { - conversationId: null, - updatedAt: new Date(), - } + const updateWhere = and( + eq(copilotChats.id, chatId), + eq(copilotChats.userId, session.user.id), + eq(copilotChats.conversationId, streamId) + ) - const hasContent = content.trim().length > 0 - const hasBlocks = Array.isArray(contentBlocks) && contentBlocks.length > 0 - const synthesizedStoppedBlocks = hasBlocks - ? contentBlocks - : hasContent - ? [{ type: 'text', channel: 'assistant', content }, { type: 'stopped' }] - : [{ type: 'stopped' }] - if (canAppendAssistant) { - const normalized = normalizeMessage({ - id: generateId(), - role: 'assistant', - content, - timestamp: new Date().toISOString(), - contentBlocks: synthesizedStoppedBlocks, - }) - const assistantMessage: PersistedMessage = normalized - setClause.messages = sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb` - } + const setClause: Record = { + conversationId: null, + updatedAt: new Date(), + } - const [updated] = await db - .update(copilotChats) - .set(setClause) - .where(updateWhere) - .returning({ workspaceId: copilotChats.workspaceId }) + const hasContent = content.trim().length > 0 + const hasBlocks = Array.isArray(contentBlocks) && contentBlocks.length > 0 + const synthesizedStoppedBlocks = hasBlocks + ? contentBlocks + : hasContent + ? [{ type: 'text', channel: 'assistant', content }, { type: 'stopped' }] + : [{ type: 'stopped' }] + if (canAppendAssistant) { + const normalized = normalizeMessage({ + id: generateId(), + role: 'assistant', + content, + timestamp: new Date().toISOString(), + contentBlocks: synthesizedStoppedBlocks, + }) + const assistantMessage: PersistedMessage = normalized + setClause.messages = sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb` + } + span.setAttribute(TraceAttr.CopilotStopAppendedAssistant, canAppendAssistant) - if (updated?.workspaceId) { - taskPubSub?.publishStatusChanged({ - workspaceId: updated.workspaceId, - chatId, - type: 'completed', - }) - } + const [updated] = await db + .update(copilotChats) + .set(setClause) + .where(updateWhere) + .returning({ workspaceId: copilotChats.workspaceId }) + + if (updated?.workspaceId) { + taskPubSub?.publishStatusChanged({ + workspaceId: updated.workspaceId, + chatId, + type: 'completed', + }) + } - return NextResponse.json({ success: true }) - } catch (error) { - if (error instanceof z.ZodError) { - return NextResponse.json({ error: 'Invalid request' }, { status: 400 }) + span.setAttribute(TraceAttr.CopilotStopOutcome, updated ? 'persisted' : 'no_matching_row') + return NextResponse.json({ success: true }) + } catch (error) { + if (error instanceof z.ZodError) { + span.setAttribute(TraceAttr.CopilotStopOutcome, 'validation_error') + return NextResponse.json({ error: 'Invalid request' }, { status: 400 }) + } + logger.error('Error stopping chat stream:', error) + span.setAttribute(TraceAttr.CopilotStopOutcome, 'internal_error') + return NextResponse.json({ error: 'Internal server error' }, { status: 500 }) + } } - logger.error('Error stopping chat stream:', error) - return NextResponse.json({ error: 'Internal server error' }, { status: 500 }) - } + ) } diff --git a/apps/sim/app/api/copilot/chat/stream/route.ts b/apps/sim/app/api/copilot/chat/stream/route.ts index 54502269f15..47ae09984f0 100644 --- a/apps/sim/app/api/copilot/chat/stream/route.ts +++ b/apps/sim/app/api/copilot/chat/stream/route.ts @@ -6,6 +6,7 @@ import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, } from '@/lib/copilot/generated/mothership-stream-v1' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http' import { getCopilotTracer } from '@/lib/copilot/request/otel' import { @@ -126,10 +127,10 @@ export async function GET(request: NextRequest) { // attaches to this root. const rootSpan = getCopilotTracer().startSpan('copilot.resume.request', { attributes: { - 'copilot.transport': batchMode ? 'batch' : 'stream', - 'stream.id': streamId, - 'user.id': authenticatedUserId, - 'copilot.resume.after_cursor': afterCursor || '0', + [TraceAttr.CopilotTransport]: batchMode ? 'batch' : 'stream', + [TraceAttr.StreamId]: streamId, + [TraceAttr.UserId]: authenticatedUserId, + [TraceAttr.CopilotResumeAfterCursor]: afterCursor || '0', }, }) const rootContext = trace.setSpan(otelContext.active(), rootSpan) @@ -189,11 +190,11 @@ async function handleResumeRequestBody({ runStatus: run?.status, }) if (!run) { - rootSpan.setAttribute('copilot.resume.outcome', 'stream_not_found') + rootSpan.setAttribute(TraceAttr.CopilotResumeOutcome, 'stream_not_found') rootSpan.end() return NextResponse.json({ error: 'Stream not found' }, { status: 404 }) } - rootSpan.setAttribute('copilot.run.status', run.status) + rootSpan.setAttribute(TraceAttr.CopilotRunStatus, run.status) if (batchMode) { const afterSeq = afterCursor || '0' @@ -216,9 +217,9 @@ async function handleResumeRequestBody({ runStatus: run.status, }) rootSpan.setAttributes({ - 'copilot.resume.outcome': 'batch_delivered', - 'copilot.resume.event_count': batchEvents.length, - 'copilot.resume.preview_session_count': previewSessions.length, + [TraceAttr.CopilotResumeOutcome]: 'batch_delivered', + [TraceAttr.CopilotResumeEventCount]: batchEvents.length, + [TraceAttr.CopilotResumePreviewSessionCount]: previewSessions.length, }) rootSpan.end() return NextResponse.json({ @@ -409,14 +410,14 @@ async function handleResumeRequestBody({ request.signal.removeEventListener('abort', abortListener) closeController() rootSpan.setAttributes({ - 'copilot.resume.outcome': sawTerminalEvent + [TraceAttr.CopilotResumeOutcome]: sawTerminalEvent ? 'terminal_delivered' : controllerClosed ? 'client_disconnected' : 'ended_without_terminal', - 'copilot.resume.event_count': totalEventsFlushed, - 'copilot.resume.poll_iterations': pollIterations, - 'copilot.resume.duration_ms': Date.now() - startTime, + [TraceAttr.CopilotResumeEventCount]: totalEventsFlushed, + [TraceAttr.CopilotResumePollIterations]: pollIterations, + [TraceAttr.CopilotResumeDurationMs]: Date.now() - startTime, }) rootSpan.end() } diff --git a/apps/sim/app/api/copilot/confirm/route.ts b/apps/sim/app/api/copilot/confirm/route.ts index 83aea100f6b..a8734d74d2b 100644 --- a/apps/sim/app/api/copilot/confirm/route.ts +++ b/apps/sim/app/api/copilot/confirm/route.ts @@ -13,6 +13,8 @@ import { getRunSegment, upsertAsyncToolCall, } from '@/lib/copilot/async-runs/repository' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { publishToolConfirmation } from '@/lib/copilot/persistence/tool-confirm' import { authenticateCopilotRequestSessionOnly, @@ -22,6 +24,7 @@ import { createRequestTracker, createUnauthorizedResponse, } from '@/lib/copilot/request/http' +import { withIncomingGoSpan } from '@/lib/copilot/request/otel' const logger = createLogger('CopilotConfirmAPI') @@ -115,90 +118,119 @@ async function updateToolCallStatus( /** * POST /api/copilot/confirm * Accept client tool completion or detach confirmations. + * + * Hang-critical: this is the delivery path for client-executed tool + * results. If this handler stalls (DB lock, Redis timeout, pubsub + * failure), the `copilot.tool.wait_for_client_result` span on the + * originating chat stream never resolves and the whole request looks + * hung. The root span here gives us per-request visibility so we can + * correlate a slow confirm with the chat-stream that was waiting on it + * via `toolCallId`. */ export async function POST(req: NextRequest) { const tracker = createRequestTracker() - try { - // Authenticate user using consolidated helper - const { userId: authenticatedUserId, isAuthenticated } = - await authenticateCopilotRequestSessionOnly() - - if (!isAuthenticated) { - return createUnauthorizedResponse() - } - - const body = await req.json() - const { toolCallId, status, message, data } = ConfirmationSchema.parse(body) - const existing = await getAsyncToolCall(toolCallId).catch((err) => { - logger.warn('Failed to fetch async tool call', { - toolCallId, - error: err instanceof Error ? err.message : String(err), - }) - return null - }) - - if (!existing) { - return createNotFoundResponse('Tool call not found') + return withIncomingGoSpan( + req.headers, + TraceSpan.CopilotConfirmToolResult, + { 'request.id': tracker.requestId }, + async (span) => { + try { + const { userId: authenticatedUserId, isAuthenticated } = + await authenticateCopilotRequestSessionOnly() + + if (!isAuthenticated || !authenticatedUserId) { + span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'unauthorized') + return createUnauthorizedResponse() + } + + const body = await req.json() + const { toolCallId, status, message, data } = ConfirmationSchema.parse(body) + span.setAttributes({ + [TraceAttr.ToolCallId]: toolCallId, + [TraceAttr.ToolConfirmationStatus]: status, + [TraceAttr.UserId]: authenticatedUserId, + }) + + const existing = await getAsyncToolCall(toolCallId).catch((err) => { + logger.warn('Failed to fetch async tool call', { + toolCallId, + error: err instanceof Error ? err.message : String(err), + }) + return null + }) + + if (!existing) { + span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'tool_call_not_found') + return createNotFoundResponse('Tool call not found') + } + if (existing.toolName) span.setAttribute(TraceAttr.ToolName, existing.toolName) + if (existing.runId) span.setAttribute(TraceAttr.RunId, existing.runId) + + const run = await getRunSegment(existing.runId).catch((err) => { + logger.warn('Failed to fetch run segment', { + runId: existing.runId, + error: err instanceof Error ? err.message : String(err), + }) + return null + }) + if (!run) { + span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'run_not_found') + return createNotFoundResponse('Tool call run not found') + } + if (run.userId !== authenticatedUserId) { + span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'forbidden') + return NextResponse.json({ error: 'Forbidden' }, { status: 403 }) + } + + const updated = await updateToolCallStatus(existing, status, message, data) + + if (!updated) { + logger.error(`[${tracker.requestId}] Failed to update tool call status`, { + userId: authenticatedUserId, + toolCallId, + status, + internalStatus: status, + message, + }) + span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'update_failed') + return createBadRequestResponse( + 'Failed to update tool call status or tool call not found' + ) + } + + span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'delivered') + return NextResponse.json({ + success: true, + message: message || `Tool call ${toolCallId} has been ${status.toLowerCase()}`, + toolCallId, + status, + }) + } catch (error) { + const duration = tracker.getDuration() + + if (error instanceof z.ZodError) { + logger.error(`[${tracker.requestId}] Request validation error:`, { + duration, + errors: error.errors, + }) + span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'validation_error') + return createBadRequestResponse( + `Invalid request data: ${error.errors.map((e) => e.message).join(', ')}` + ) + } + + logger.error(`[${tracker.requestId}] Unexpected error:`, { + duration, + error: error instanceof Error ? error.message : 'Unknown error', + stack: error instanceof Error ? error.stack : undefined, + }) + + span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'internal_error') + return createInternalServerErrorResponse( + error instanceof Error ? error.message : 'Internal server error' + ) + } } - - const run = await getRunSegment(existing.runId).catch((err) => { - logger.warn('Failed to fetch run segment', { - runId: existing.runId, - error: err instanceof Error ? err.message : String(err), - }) - return null - }) - if (!run) { - return createNotFoundResponse('Tool call run not found') - } - if (run.userId !== authenticatedUserId) { - return NextResponse.json({ error: 'Forbidden' }, { status: 403 }) - } - - // Update the durable tool call status and wake any waiters. - const updated = await updateToolCallStatus(existing, status, message, data) - - if (!updated) { - logger.error(`[${tracker.requestId}] Failed to update tool call status`, { - userId: authenticatedUserId, - toolCallId, - status, - internalStatus: status, - message, - }) - return createBadRequestResponse('Failed to update tool call status or tool call not found') - } - - const duration = tracker.getDuration() - - return NextResponse.json({ - success: true, - message: message || `Tool call ${toolCallId} has been ${status.toLowerCase()}`, - toolCallId, - status, - }) - } catch (error) { - const duration = tracker.getDuration() - - if (error instanceof z.ZodError) { - logger.error(`[${tracker.requestId}] Request validation error:`, { - duration, - errors: error.errors, - }) - return createBadRequestResponse( - `Invalid request data: ${error.errors.map((e) => e.message).join(', ')}` - ) - } - - logger.error(`[${tracker.requestId}] Unexpected error:`, { - duration, - error: error instanceof Error ? error.message : 'Unknown error', - stack: error instanceof Error ? error.stack : undefined, - }) - - return createInternalServerErrorResponse( - error instanceof Error ? error.message : 'Internal server error' - ) - } + ) } diff --git a/apps/sim/instrumentation-node.ts b/apps/sim/instrumentation-node.ts index d08ccdd1817..b78f1ddfda1 100644 --- a/apps/sim/instrumentation-node.ts +++ b/apps/sim/instrumentation-node.ts @@ -34,6 +34,7 @@ import type { SpanProcessor, } from '@opentelemetry/sdk-trace-base' import { createLogger } from '@sim/logger' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { env } from './lib/core/config/env' diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.ERROR) @@ -118,24 +119,33 @@ function parseOtlpHeadersEnv(raw: string): Record { * Normalize an OTLP base URL to the full traces-signal endpoint. * * The OTel HTTP exporter sends to whatever URL you give it verbatim - * — no signal-path appending. That's a footgun when the same env - * var also flows into the Go side, where the SDK *does* append - * `/v1/traces` automatically. We bridge the gap here so both halves - * of the mothership can share one endpoint value. + * — no signal-path appending. Meanwhile, the OTel spec says + * `OTEL_EXPORTER_OTLP_ENDPOINT` is a *base* URL and the SDK should + * append `/v1/traces`. We reconcile by always ensuring the final + * URL ends with `/v1/traces` unless the operator already put it + * there. * * Rules: - * - If the URL already has a non-root path, respect it (operator - * intent: "post to exactly this URL"). - * - Otherwise, append `/v1/traces`. + * - If the URL already ends with `/v1/traces`, respect it. + * - Otherwise, append `/v1/traces` (dropping any trailing slash + * on the base first). * - Malformed URLs pass through unchanged; the exporter will * surface the error at first export. + * + * Examples: + * https://api.honeycomb.io → https://api.honeycomb.io/v1/traces + * https://api.honeycomb.io/v1/traces → https://api.honeycomb.io/v1/traces + * https://otlp-gateway-prod-us-east-3.grafana.net/otlp + * → …/otlp/v1/traces + * http://localhost:4318 → http://localhost:4318/v1/traces */ function normalizeOtlpTracesUrl(url: string): string { if (!url) return url try { const u = new URL(url) - if (u.pathname && u.pathname !== '/') return url - return `${url.replace(/\/$/, '')}/v1/traces` + if (u.pathname.endsWith('/v1/traces')) return url + const base = url.replace(/\/$/, '') + return `${base}/v1/traces` } catch { return url } @@ -187,7 +197,7 @@ class MothershipOriginSpanProcessor implements SpanProcessor { if (!isBusinessSpan(name)) { return } - span.setAttribute('mothership.origin', MOTHERSHIP_ORIGIN) + span.setAttribute(TraceAttr.MothershipOrigin, MOTHERSHIP_ORIGIN) if (!name.startsWith(SPAN_NAME_PREFIX)) { span.updateName(`${SPAN_NAME_PREFIX}${name}`) } @@ -365,7 +375,17 @@ async function initializeOpenTelemetry() { resourceFromAttributes({ [ATTR_SERVICE_NAME]: telemetryConfig.serviceName, [ATTR_SERVICE_VERSION]: telemetryConfig.serviceVersion, - [ATTR_DEPLOYMENT_ENVIRONMENT]: env.NODE_ENV || 'development', + // Explicit OTel env var wins; fall back to `DEPLOYMENT_ENVIRONMENT` + // for alt spellings; finally fall back to `NODE_ENV` so local dev + // (which rarely sets the otel vars) still produces a reasonable + // label. Matches the Go side's `resourceEnvFromEnv()` so Sim and + // Go always tag the same `deployment.environment` value for the + // same deploy. + [ATTR_DEPLOYMENT_ENVIRONMENT]: + process.env.OTEL_DEPLOYMENT_ENVIRONMENT || + process.env.DEPLOYMENT_ENVIRONMENT || + env.NODE_ENV || + 'development', 'service.namespace': 'mothership', 'service.instance.id': serviceInstanceId, 'mothership.origin': MOTHERSHIP_ORIGIN, diff --git a/apps/sim/lib/copilot/async-runs/repository.ts b/apps/sim/lib/copilot/async-runs/repository.ts index 1b97d061204..8a79eae8e2a 100644 --- a/apps/sim/lib/copilot/async-runs/repository.ts +++ b/apps/sim/lib/copilot/async-runs/repository.ts @@ -9,6 +9,7 @@ import { } from '@sim/db/schema' import { createLogger } from '@sim/logger' import { and, desc, eq, inArray, isNull } from 'drizzle-orm' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { ASYNC_TOOL_STATUS, type AsyncCompletionData, @@ -36,9 +37,9 @@ async function withDbSpan( ): Promise { const span = getAsyncRunsTracer().startSpan(name, { attributes: { - 'db.system': 'postgresql', - 'db.operation': op, - 'db.sql.table': table, + [TraceAttr.DbSystem]: 'postgresql', + [TraceAttr.DbOperation]: op, + [TraceAttr.DbSqlTable]: table, ...Object.fromEntries(Object.entries(attrs).filter(([, v]) => v !== undefined)), }, }) diff --git a/apps/sim/lib/copilot/chat/post.ts b/apps/sim/lib/copilot/chat/post.ts index 4a2903dad05..d8aaacb7df3 100644 --- a/apps/sim/lib/copilot/chat/post.ts +++ b/apps/sim/lib/copilot/chat/post.ts @@ -19,6 +19,7 @@ import { import { finalizeAssistantTurn } from '@/lib/copilot/chat/terminal-state' import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context' import { COPILOT_REQUEST_MODES } from '@/lib/copilot/constants' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { createBadRequestResponse, @@ -314,8 +315,8 @@ async function persistUserMessage(params: { const messagesAfter = Array.isArray(updated?.messages) ? updated.messages : undefined span.setAttributes({ - 'chat.persist.outcome': updated ? 'appended' : 'chat_not_found', - 'chat.messages_after': messagesAfter?.length ?? 0, + [TraceAttr.ChatPersistOutcome]: updated ? 'appended' : 'chat_not_found', + [TraceAttr.ChatMessagesAfter]: messagesAfter?.length ?? 0, }) if (notifyWorkspaceStatus && updated && workspaceId) { @@ -615,7 +616,7 @@ export async function handleUnifiedChatPost(req: NextRequest) { const authenticatedUserId = session.user.id const body = ChatMessageSchema.parse(await req.json()) - const normalizedContexts = normalizeContexts(body.contexts) + const normalizedContexts = normalizeContexts(body.contexts) ?? [] userMessageId = body.userMessageId || crypto.randomUUID() otelRoot = startCopilotOtelRoot({ @@ -624,6 +625,11 @@ export async function handleUnifiedChatPost(req: NextRequest) { executionId, runId, transport: 'stream', + // Truncated prompt for the dashboard "user message" column. + // Unconditional (no PII env gate) — a preview snippet is + // cheap and widely useful; full content is gated separately + // by setInputMessages above. + userMessagePreview: body.message, }) // Emit `gen_ai.input.messages` on the root agent span for OTel // GenAI spec compliance (Honeycomb's Gen AI view keys off this). @@ -637,15 +643,24 @@ export async function handleUnifiedChatPost(req: NextRequest) { // wrapper those spans became orphan roots and each showed up as a // separate trace in Jaeger. return await otelContextApi.with(otelRoot.context, async () => { - const branch = await resolveBranch({ - authenticatedUserId, - workflowId: body.workflowId, - workflowName: body.workflowName, - workspaceId: body.workspaceId, - model: body.model, - mode: body.mode, - provider: body.provider, - }) + const branch = await withCopilotSpan( + TraceSpan.CopilotChatResolveBranch, + { + 'branch.workflow_id': body.workflowId ?? '', + 'branch.workspace_id': body.workspaceId ?? '', + }, + () => + resolveBranch({ + authenticatedUserId, + workflowId: body.workflowId, + workflowName: body.workflowName, + workspaceId: body.workspaceId, + model: body.model, + mode: body.mode, + provider: body.provider, + }), + otelRoot!.context + ) if (branch instanceof NextResponse) { return branch } @@ -656,14 +671,23 @@ export async function handleUnifiedChatPost(req: NextRequest) { actualChatId = body.chatId if (body.chatId || body.createNewChat) { - const chatResult = await resolveOrCreateChat({ - chatId: body.chatId, - userId: authenticatedUserId, - ...(branch.kind === 'workflow' ? { workflowId: branch.workflowId } : {}), - workspaceId: branch.workspaceId, - model: branch.titleModel, - type: branch.kind === 'workflow' ? 'copilot' : 'mothership', - }) + const chatResult = await withCopilotSpan( + TraceSpan.CopilotChatResolveOrCreateChat, + { + 'chat.preexisting': !!body.chatId, + 'chat.create_new': !!body.createNewChat, + }, + () => + resolveOrCreateChat({ + chatId: body.chatId, + userId: authenticatedUserId, + ...(branch.kind === 'workflow' ? { workflowId: branch.workflowId } : {}), + workspaceId: branch.workspaceId, + model: branch.titleModel, + type: branch.kind === 'workflow' ? 'copilot' : 'mothership', + }), + otelRoot!.context + ) currentChat = chatResult.chat actualChatId = chatResult.chatId || body.chatId chatIsNew = chatResult.isNew @@ -687,8 +711,11 @@ export async function handleUnifiedChatPost(req: NextRequest) { ) } + let pendingStreamWaitMs = 0 if (actualChatId) { + const lockStart = Date.now() chatStreamLockAcquired = await acquirePendingChatStream(actualChatId, userMessageId) + pendingStreamWaitMs = Date.now() - lockStart if (!chatStreamLockAcquired) { const activeStreamId = await getPendingChatStreamId(actualChatId) return NextResponse.json( @@ -701,6 +728,25 @@ export async function handleUnifiedChatPost(req: NextRequest) { } } + // Stamp request-shape metadata on the root `gen_ai.agent.execute` + // span now that `branch`, attachment counts, and the pending-stream + // wait are all known. This turns dashboard slicing by + // `copilot.surface` / `copilot.mode` / `copilot.interrupted_prior_stream` + // into a simple TraceQL filter. + otelRoot!.setRequestShape({ + branchKind: branch.kind, + mode: body.mode, + model: body.model, + provider: body.provider, + createNewChat: body.createNewChat, + prefetch: body.prefetch, + fileAttachmentsCount: body.fileAttachments?.length ?? 0, + resourceAttachmentsCount: body.resourceAttachments?.length ?? 0, + contextsCount: normalizedContexts.length, + commandsCount: body.commands?.length ?? 0, + pendingStreamWaitMs, + }) + const workspaceId = branch.workspaceId const userPermissionPromise = workspaceId ? getUserEntityPermissions(authenticatedUserId, 'workspace', workspaceId).catch((error) => { @@ -711,19 +757,38 @@ export async function handleUnifiedChatPost(req: NextRequest) { return null }) : Promise.resolve(null) + // Wrap the pre-LLM prep work in spans so the trace waterfall shows + // where time is going between "request received" and "llm.stream + // opens". Previously these ran bare under the root and inflated the + // apparent "gap" before the model call. Each promise is its own + // span; they run concurrently under Promise.all below. const workspaceContextPromise = branch.kind === 'workspace' - ? generateWorkspaceContext(branch.workspaceId, authenticatedUserId) + ? withCopilotSpan( + TraceSpan.CopilotChatBuildWorkspaceContext, + { 'workspace.id': branch.workspaceId }, + () => generateWorkspaceContext(branch.workspaceId, authenticatedUserId), + otelRoot!.context + ) : Promise.resolve(undefined) - const agentContextsPromise = resolveAgentContexts({ - contexts: normalizedContexts, - resourceAttachments: body.resourceAttachments, - userId: authenticatedUserId, - message: body.message, - workspaceId, - chatId: actualChatId, - requestId: tracker.requestId, - }) + const agentContextsPromise = withCopilotSpan( + TraceSpan.CopilotChatResolveAgentContexts, + { + 'contexts.count': normalizedContexts.length, + 'attachments.count': body.resourceAttachments?.length ?? 0, + }, + () => + resolveAgentContexts({ + contexts: normalizedContexts, + resourceAttachments: body.resourceAttachments, + userId: authenticatedUserId, + message: body.message, + workspaceId, + chatId: actualChatId, + requestId: tracker.requestId, + }), + otelRoot!.context + ) const persistedMessagesPromise = persistUserMessage({ chatId: actualChatId, userMessageId, @@ -734,12 +799,18 @@ export async function handleUnifiedChatPost(req: NextRequest) { notifyWorkspaceStatus: branch.notifyWorkspaceStatus, parentOtelContext: otelRoot!.context, }) - const executionContextPromise = branch.buildExecutionContext({ - userId: authenticatedUserId, - chatId: actualChatId, - userTimezone: body.userTimezone, - messageId: userMessageId, - }) + const executionContextPromise = withCopilotSpan( + TraceSpan.CopilotChatBuildExecutionContext, + { 'branch.kind': branch.kind }, + () => + branch.buildExecutionContext({ + userId: authenticatedUserId, + chatId: actualChatId, + userTimezone: body.userTimezone, + messageId: userMessageId, + }), + otelRoot!.context + ) const [agentContexts, userPermission, workspaceContext, persistedMessages, executionContext] = await Promise.all([ @@ -757,43 +828,57 @@ export async function handleUnifiedChatPost(req: NextRequest) { }) } - const requestPayload = - branch.kind === 'workflow' - ? await branch.buildPayload({ - message: body.message, - userId: authenticatedUserId, - userMessageId, - chatId: actualChatId, - contexts: agentContexts, - fileAttachments: body.fileAttachments, - userPermission: userPermission ?? undefined, - userTimezone: body.userTimezone, - workflowId: branch.workflowId, - workflowName: branch.workflowName, - workspaceId: branch.workspaceId, - mode: branch.mode, - provider: branch.provider, - commands: body.commands, - prefetch: body.prefetch, - implicitFeedback: body.implicitFeedback, - }) - : await branch.buildPayload({ - message: body.message, - userId: authenticatedUserId, - userMessageId, - chatId: actualChatId, - contexts: agentContexts, - fileAttachments: body.fileAttachments, - userPermission: userPermission ?? undefined, - userTimezone: body.userTimezone, - workspaceContext, - }) + // buildPayload is the last synchronous step before the outbound + // Sim → Go HTTP call. It runs per-tool schema generation (subscription + // lookup + registry iteration, cached 30s) and file upload tracking + // per attachment. Wrapping it so we can see how much of the + // "before llm.stream" gap lives here vs elsewhere. + const requestPayload = await withCopilotSpan( + TraceSpan.CopilotChatBuildPayload, + { + 'branch.kind': branch.kind, + 'attachments.count': body.fileAttachments?.length ?? 0, + 'contexts.count': normalizedContexts.length, + }, + () => + branch.kind === 'workflow' + ? branch.buildPayload({ + message: body.message, + userId: authenticatedUserId, + userMessageId, + chatId: actualChatId, + contexts: agentContexts, + fileAttachments: body.fileAttachments, + userPermission: userPermission ?? undefined, + userTimezone: body.userTimezone, + workflowId: branch.workflowId, + workflowName: branch.workflowName, + workspaceId: branch.workspaceId, + mode: branch.mode, + provider: branch.provider, + commands: body.commands, + prefetch: body.prefetch, + implicitFeedback: body.implicitFeedback, + }) + : branch.buildPayload({ + message: body.message, + userId: authenticatedUserId, + userMessageId, + chatId: actualChatId, + contexts: agentContexts, + fileAttachments: body.fileAttachments, + userPermission: userPermission ?? undefined, + userTimezone: body.userTimezone, + workspaceContext, + }), + otelRoot!.context + ) if (actualChatId) { - otelRoot!.span.setAttribute('chat.id', actualChatId) + otelRoot!.span.setAttribute(TraceAttr.ChatId, actualChatId) } if (workspaceId) { - otelRoot!.span.setAttribute('workspace.id', workspaceId) + otelRoot!.span.setAttribute(TraceAttr.WorkspaceId, workspaceId) } const stream = createSSEStream({ diff --git a/apps/sim/lib/copilot/chat/terminal-state.ts b/apps/sim/lib/copilot/chat/terminal-state.ts index 221a1885557..6bcb987192b 100644 --- a/apps/sim/lib/copilot/chat/terminal-state.ts +++ b/apps/sim/lib/copilot/chat/terminal-state.ts @@ -2,6 +2,7 @@ import { db } from '@sim/db' import { copilotChats } from '@sim/db/schema' import { and, eq, sql } from 'drizzle-orm' import type { PersistedMessage } from '@/lib/copilot/chat/persisted-message' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' @@ -38,7 +39,7 @@ export async function finalizeAssistantTurn({ .limit(1) const messages: Record[] = Array.isArray(row?.messages) ? row.messages : [] - span.setAttribute('chat.existing_message_count', messages.length) + span.setAttribute(TraceAttr.ChatExistingMessageCount, messages.length) const userIdx = messages.findIndex((message) => message.id === userMessageId) const alreadyHasResponse = userIdx >= 0 && @@ -64,13 +65,13 @@ export async function finalizeAssistantTurn({ messages: sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb`, }) .where(updateWhere) - span.setAttribute('chat.finalize.outcome', 'appended_assistant') + span.setAttribute(TraceAttr.ChatFinalizeOutcome, 'appended_assistant') return } await db.update(copilotChats).set(baseUpdate).where(updateWhere) span.setAttribute( - 'chat.finalize.outcome', + TraceAttr.ChatFinalizeOutcome, assistantMessage ? alreadyHasResponse ? 'assistant_already_persisted' diff --git a/apps/sim/lib/copilot/generated/trace-attributes-v1.ts b/apps/sim/lib/copilot/generated/trace-attributes-v1.ts new file mode 100644 index 00000000000..1f1e9e84a6a --- /dev/null +++ b/apps/sim/lib/copilot/generated/trace-attributes-v1.ts @@ -0,0 +1,968 @@ +// AUTO-GENERATED FILE. DO NOT EDIT. +// +// Source: copilot/copilot/contracts/trace-attributes-v1.schema.json +// Regenerate with: bun run trace-attributes-contract:generate +// +// Canonical custom mothership OTel span attribute keys. Call sites +// should reference `TraceAttr.` (e.g. +// `TraceAttr.ChatId`, `TraceAttr.ToolCallId`) rather than raw +// string literals, so the Go-side contract is the single source of +// truth and typos become compile errors. +// +// For OTel semantic-convention keys (`http.*`, `db.*`, +// `gen_ai.*`, `net.*`, `messaging.*`, `service.*`, +// `deployment.environment`), import from +// `@opentelemetry/semantic-conventions` directly — those are owned +// by the upstream OTel spec, not by this contract. + +export const TraceAttr = { + AbortBackend: 'abort.backend', + AbortFound: 'abort.found', + AbortRedisResult: 'abort.redis_result', + AnalyticsAborted: 'analytics.aborted', + AnalyticsBilledTotalCost: 'analytics.billed_total_cost', + AnalyticsCacheReadTokens: 'analytics.cache_read_tokens', + AnalyticsCacheWriteTokens: 'analytics.cache_write_tokens', + AnalyticsCustomerType: 'analytics.customer_type', + AnalyticsDurationMs: 'analytics.duration_ms', + AnalyticsError: 'analytics.error', + AnalyticsInputTokens: 'analytics.input_tokens', + AnalyticsModel: 'analytics.model', + AnalyticsOutputTokens: 'analytics.output_tokens', + AnalyticsProvider: 'analytics.provider', + AnalyticsSource: 'analytics.source', + AnalyticsToolCallCount: 'analytics.tool_call_count', + ApiKeyId: 'api_key.id', + ApiKeyName: 'api_key.name', + AuthIncomingInternal: 'auth.incoming_internal', + AuthKeyMatch: 'auth.key.match', + AuthKeyPreview: 'auth.key.preview', + AuthKeySource: 'auth.key.source', + AuthKeyType: 'auth.key.type', + AuthProvider: 'auth.provider', + AuthValidateStatusCode: 'auth.validate.status_code', + AwsRegion: 'aws.region', + BedrockErrorCode: 'bedrock.error_code', + BedrockModelId: 'bedrock.model_id', + BedrockRequestBodyBytesRetry: 'bedrock.request.body_bytes_retry', + BillingAttempts: 'billing.attempts', + BillingChangeType: 'billing.change_type', + BillingCostInputUsd: 'billing.cost.input_usd', + BillingCostOutputUsd: 'billing.cost.output_usd', + BillingCostTotalUsd: 'billing.cost.total_usd', + BillingCostUsd: 'billing.cost_usd', + BillingCustomerType: 'billing.customer_type', + BillingDuplicate: 'billing.duplicate', + BillingDurationMs: 'billing.duration_ms', + BillingHasIdempotencyKey: 'billing.has_idempotency_key', + BillingIdempotencyKey: 'billing.idempotency_key', + BillingInterval: 'billing.interval', + BillingIsMcp: 'billing.is_mcp', + BillingLlmCost: 'billing.llm_cost', + BillingNewPlan: 'billing.new_plan', + BillingOutcome: 'billing.outcome', + BillingPlan: 'billing.plan', + BillingPreviousPlan: 'billing.previous_plan', + BillingServiceCharges: 'billing.service_charges', + BillingSource: 'billing.source', + BillingTotalCost: 'billing.total_cost', + BillingUsageCurrent: 'billing.usage.current', + BillingUsageExceeded: 'billing.usage.exceeded', + BillingUsageLimit: 'billing.usage.limit', + BlockId: 'block.id', + BlockName: 'block.name', + BlockType: 'block.type', + ChatActiveMessagesBytes: 'chat.active_messages_bytes', + ChatActiveMessagesCount: 'chat.active_messages_count', + ChatAppendBytes: 'chat.append_bytes', + ChatAppendCount: 'chat.append_count', + ChatArtifactKeys: 'chat.artifact_keys', + ChatArtifactsBytes: 'chat.artifacts_bytes', + ChatAuthType: 'chat.auth_type', + ChatContextCount: 'chat.context_count', + ChatContextUsage: 'chat.context_usage', + ChatContinuationMessagesBefore: 'chat.continuation.messages_before', + ChatContinuationToolResultBytes: 'chat.continuation.tool_result_bytes', + ChatContinuationToolResultFailure: 'chat.continuation.tool_result_failure', + ChatContinuationToolResultSuccess: 'chat.continuation.tool_result_success', + ChatContinuationToolResults: 'chat.continuation.tool_results', + ChatContinuationTotalToolCalls: 'chat.continuation.total_tool_calls', + ChatExistingMessageCount: 'chat.existing_message_count', + ChatFileAttachmentCount: 'chat.file_attachment_count', + ChatFinalizeOutcome: 'chat.finalize.outcome', + ChatFound: 'chat.found', + ChatHasAssistantMessage: 'chat.has_assistant_message', + ChatHasOutputConfigs: 'chat.has_output_configs', + ChatId: 'chat.id', + ChatMessageBytes: 'chat.message_bytes', + ChatMessagesAfter: 'chat.messages_after', + ChatMessagesBytes: 'chat.messages_bytes', + ChatMessagesCount: 'chat.messages_count', + ChatPersistOutcome: 'chat.persist.outcome', + ChatPreexisting: 'chat.preexisting', + ChatRollbackIndex: 'chat.rollback_index', + ChatTokensUsed: 'chat.tokens_used', + ChatType: 'chat.type', + ChatUserMessageId: 'chat.user_message_id', + CheckpointAge: 'checkpoint.age', + CheckpointAttemptsBytes: 'checkpoint.attempts_bytes', + CheckpointBytesAssistantToolUse: 'checkpoint.bytes.assistant_tool_use', + CheckpointBytesCurrentMessages: 'checkpoint.bytes.current_messages', + CheckpointBytesImmediateResults: 'checkpoint.bytes.immediate_results', + CheckpointBytesPendingToolCalls: 'checkpoint.bytes.pending_tool_calls', + CheckpointBytesProviderRequest: 'checkpoint.bytes.provider_request', + CheckpointBytesRequestContext: 'checkpoint.bytes.request_context', + CheckpointBytesToolUsage: 'checkpoint.bytes.tool_usage', + CheckpointCachedCredentialsBytes: 'checkpoint.cached_credentials_bytes', + CheckpointClaimed: 'checkpoint.claimed', + CheckpointClaimedNow: 'checkpoint.claimed_now', + CheckpointCompletedBytes: 'checkpoint.completed_bytes', + CheckpointCompletedSteps: 'checkpoint.completed_steps', + CheckpointCurrentMessages: 'checkpoint.current_messages', + CheckpointDecisionsBytes: 'checkpoint.decisions_bytes', + CheckpointFound: 'checkpoint.found', + CheckpointFrames: 'checkpoint.frames', + CheckpointId: 'checkpoint.id', + CheckpointImmediateResults: 'checkpoint.immediate_results', + CheckpointMessageId: 'checkpoint.message_id', + CheckpointPendingBytes: 'checkpoint.pending_bytes', + CheckpointPendingSteps: 'checkpoint.pending_steps', + CheckpointPendingToolCount: 'checkpoint.pending_tool_count', + CheckpointRows: 'checkpoint.rows', + CheckpointTaskId: 'checkpoint.task_id', + CheckpointTotalToolCalls: 'checkpoint.total_tool_calls', + CheckpointWorkflowSnapshotBytes: 'checkpoint.workflow_snapshot_bytes', + ClientVersion: 'client.version', + ConditionId: 'condition.id', + ConditionName: 'condition.name', + ConditionResult: 'condition.result', + ContextReduceBudgetChars: 'context.reduce.budget_chars', + ContextReduceCaller: 'context.reduce.caller', + ContextReduceDidReduce: 'context.reduce.did_reduce', + ContextReduceInputChars: 'context.reduce.input_chars', + ContextReduceInputMessages: 'context.reduce.input_messages', + ContextReduceOutcome: 'context.reduce.outcome', + ContextReduceOutputChars: 'context.reduce.output_chars', + ContextReduceOutputMessages: 'context.reduce.output_messages', + ContextReduced: 'context.reduced', + ContextSummarizeInputChars: 'context.summarize.input_chars', + ContextSummarizeOutputChars: 'context.summarize.output_chars', + CopilotAbortGoMarkerOk: 'copilot.abort.go_marker_ok', + CopilotAbortLocalAborted: 'copilot.abort.local_aborted', + CopilotAbortOutcome: 'copilot.abort.outcome', + CopilotAsyncToolClaimedBy: 'copilot.async_tool.claimed_by', + CopilotAsyncToolHasError: 'copilot.async_tool.has_error', + CopilotAsyncToolIdsCount: 'copilot.async_tool.ids_count', + CopilotAsyncToolStatus: 'copilot.async_tool.status', + CopilotAsyncToolWorkerId: 'copilot.async_tool.worker_id', + CopilotBranchKind: 'copilot.branch.kind', + CopilotChatIsNew: 'copilot.chat.is_new', + CopilotCommandsCount: 'copilot.commands.count', + CopilotConfirmOutcome: 'copilot.confirm.outcome', + CopilotContextsCount: 'copilot.contexts.count', + CopilotFileAttachmentsCount: 'copilot.file_attachments.count', + CopilotFinalizeOutcome: 'copilot.finalize.outcome', + CopilotInterruptedPriorStream: 'copilot.interrupted_prior_stream', + CopilotLeg: 'copilot.leg', + CopilotMode: 'copilot.mode', + CopilotOperation: 'copilot.operation', + CopilotOutputFileBytes: 'copilot.output_file.bytes', + CopilotOutputFileFormat: 'copilot.output_file.format', + CopilotOutputFileId: 'copilot.output_file.id', + CopilotOutputFileName: 'copilot.output_file.name', + CopilotOutputFileOutcome: 'copilot.output_file.outcome', + CopilotPendingStreamWaitMs: 'copilot.pending_stream.wait_ms', + CopilotPrefetch: 'copilot.prefetch', + CopilotPublisherClientDisconnected: 'copilot.publisher.client_disconnected', + CopilotPublisherSawComplete: 'copilot.publisher.saw_complete', + CopilotRecoveryLatestSeq: 'copilot.recovery.latest_seq', + CopilotRecoveryOldestSeq: 'copilot.recovery.oldest_seq', + CopilotRecoveryOutcome: 'copilot.recovery.outcome', + CopilotRecoveryRequestedAfterSeq: 'copilot.recovery.requested_after_seq', + CopilotRequestOutcome: 'copilot.request.outcome', + CopilotResourceAttachmentsCount: 'copilot.resource_attachments.count', + CopilotResourcesAborted: 'copilot.resources.aborted', + CopilotResourcesOp: 'copilot.resources.op', + CopilotResourcesRemovedCount: 'copilot.resources.removed_count', + CopilotResourcesUpsertedCount: 'copilot.resources.upserted_count', + CopilotResultContentBlocks: 'copilot.result.content_blocks', + CopilotResultContentLength: 'copilot.result.content_length', + CopilotResultToolCalls: 'copilot.result.tool_calls', + CopilotResumeAfterCursor: 'copilot.resume.after_cursor', + CopilotResumeDurationMs: 'copilot.resume.duration_ms', + CopilotResumeEventCount: 'copilot.resume.event_count', + CopilotResumeOutcome: 'copilot.resume.outcome', + CopilotResumePollIterations: 'copilot.resume.poll_iterations', + CopilotResumePreviewSessionCount: 'copilot.resume.preview_session_count', + CopilotRoute: 'copilot.route', + CopilotRunAgent: 'copilot.run.agent', + CopilotRunHasCompletedAt: 'copilot.run.has_completed_at', + CopilotRunHasError: 'copilot.run.has_error', + CopilotRunModel: 'copilot.run.model', + CopilotRunParentId: 'copilot.run.parent_id', + CopilotRunProvider: 'copilot.run.provider', + CopilotRunStatus: 'copilot.run.status', + CopilotStopAppendedAssistant: 'copilot.stop.appended_assistant', + CopilotStopBlocksCount: 'copilot.stop.blocks_count', + CopilotStopContentLength: 'copilot.stop.content_length', + CopilotStopOutcome: 'copilot.stop.outcome', + CopilotStream: 'copilot.stream', + CopilotSurface: 'copilot.surface', + CopilotTableId: 'copilot.table.id', + CopilotTableOutcome: 'copilot.table.outcome', + CopilotTableRowCount: 'copilot.table.row_count', + CopilotTableSourceContentBytes: 'copilot.table.source.content_bytes', + CopilotTableSourceFormat: 'copilot.table.source.format', + CopilotTableSourcePath: 'copilot.table.source.path', + CopilotTraceSpanCount: 'copilot.trace.span_count', + CopilotTransport: 'copilot.transport', + CopilotUserMessagePreview: 'copilot.user.message_preview', + CopilotValidateOutcome: 'copilot.validate.outcome', + CopilotVfsFileExtension: 'copilot.vfs.file.extension', + CopilotVfsFileMediaType: 'copilot.vfs.file.media_type', + CopilotVfsFileName: 'copilot.vfs.file.name', + CopilotVfsFileSizeBytes: 'copilot.vfs.file.size_bytes', + CopilotVfsHasAlpha: 'copilot.vfs.has_alpha', + CopilotVfsInputBytes: 'copilot.vfs.input.bytes', + CopilotVfsInputHeight: 'copilot.vfs.input.height', + CopilotVfsInputMediaTypeClaimed: 'copilot.vfs.input.media_type_claimed', + CopilotVfsInputMediaTypeDetected: 'copilot.vfs.input.media_type_detected', + CopilotVfsInputWidth: 'copilot.vfs.input.width', + CopilotVfsMetadataFailed: 'copilot.vfs.metadata.failed', + CopilotVfsOutcome: 'copilot.vfs.outcome', + CopilotVfsOutputBytes: 'copilot.vfs.output.bytes', + CopilotVfsOutputMediaType: 'copilot.vfs.output.media_type', + CopilotVfsReadImageResized: 'copilot.vfs.read.image.resized', + CopilotVfsReadOutcome: 'copilot.vfs.read.outcome', + CopilotVfsReadOutputBytes: 'copilot.vfs.read.output.bytes', + CopilotVfsReadOutputLines: 'copilot.vfs.read.output.lines', + CopilotVfsReadOutputMediaType: 'copilot.vfs.read.output.media_type', + CopilotVfsReadPath: 'copilot.vfs.read.path', + CopilotVfsResizeAttempts: 'copilot.vfs.resize.attempts', + CopilotVfsResizeChosenDimension: 'copilot.vfs.resize.chosen_dimension', + CopilotVfsResizeChosenQuality: 'copilot.vfs.resize.chosen_quality', + CopilotVfsResized: 'copilot.vfs.resized', + CopilotVfsSharpLoadFailed: 'copilot.vfs.sharp.load_failed', + CostDefaultCost: 'cost.default_cost', + CredentialSetId: 'credential_set.id', + CredentialSetName: 'credential_set.name', + DbOperation: 'db.operation', + DbSqlTable: 'db.sql.table', + DbSystem: 'db.system', + DeploymentEnvironment: 'deployment.environment', + DeploymentVersion: 'deployment.version', + DocumentFileSize: 'document.file_size', + DocumentMimeType: 'document.mime_type', + DocumentsCount: 'documents.count', + DocumentsUploadType: 'documents.upload_type', + Error: 'error', + ErrorCode: 'error.code', + ErrorInternal: 'error.internal', + ErrorType: 'error.type', + EventName: 'event.name', + EventTimestamp: 'event.timestamp', + ExecutionBlocksExecuted: 'execution.blocks_executed', + ExecutionDurationMs: 'execution.duration_ms', + ExecutionErrorMessage: 'execution.error_message', + ExecutionHasErrors: 'execution.has_errors', + ExecutionStatus: 'execution.status', + ExecutionTotalCost: 'execution.total_cost', + ExecutionTrigger: 'execution.trigger', + FunctionExecutionTimeMs: 'function.execution_time_ms', + FunctionId: 'function.id', + FunctionName: 'function.name', + GenAiAgentId: 'gen_ai.agent.id', + GenAiAgentName: 'gen_ai.agent.name', + GenAiCostInput: 'gen_ai.cost.input', + GenAiCostOutput: 'gen_ai.cost.output', + GenAiCostTotal: 'gen_ai.cost.total', + GenAiInputMessages: 'gen_ai.input.messages', + GenAiOperationName: 'gen_ai.operation.name', + GenAiOutputMessages: 'gen_ai.output.messages', + GenAiRequestAssistantMessages: 'gen_ai.request.assistant_messages', + GenAiRequestContentBlocks: 'gen_ai.request.content_blocks', + GenAiRequestHasCacheControl: 'gen_ai.request.has_cache_control', + GenAiRequestImageBlocks: 'gen_ai.request.image_blocks', + GenAiRequestImageDataBytes: 'gen_ai.request.image_data_bytes', + GenAiRequestMaxMessageBlocks: 'gen_ai.request.max_message_blocks', + GenAiRequestMessagesCount: 'gen_ai.request.messages.count', + GenAiRequestModel: 'gen_ai.request.model', + GenAiRequestSystemChars: 'gen_ai.request.system_chars', + GenAiRequestTextBlocks: 'gen_ai.request.text_blocks', + GenAiRequestToolResultBlocks: 'gen_ai.request.tool_result_blocks', + GenAiRequestToolUseBlocks: 'gen_ai.request.tool_use_blocks', + GenAiRequestToolsCount: 'gen_ai.request.tools.count', + GenAiRequestUserMessages: 'gen_ai.request.user_messages', + GenAiSystem: 'gen_ai.system', + GenAiToolName: 'gen_ai.tool.name', + GenAiUsageCacheCreationTokens: 'gen_ai.usage.cache_creation_tokens', + GenAiUsageCacheReadTokens: 'gen_ai.usage.cache_read_tokens', + GenAiUsageInputTokens: 'gen_ai.usage.input_tokens', + GenAiUsageOutputTokens: 'gen_ai.usage.output_tokens', + GenAiUsageTotalTokens: 'gen_ai.usage.total_tokens', + GenAiWorkflowExecutionId: 'gen_ai.workflow.execution_id', + GenAiWorkflowId: 'gen_ai.workflow.id', + GenAiWorkflowName: 'gen_ai.workflow.name', + HostedKeyEnvVar: 'hosted_key.env_var', + HttpHost: 'http.host', + HttpMethod: 'http.method', + HttpPath: 'http.path', + HttpRemoteAddr: 'http.remote_addr', + HttpRequestContentLength: 'http.request.content_length', + HttpResponseBodyBytes: 'http.response.body_bytes', + HttpResponseContentLength: 'http.response.content_length', + HttpResponseHeadersMs: 'http.response.headers_ms', + HttpResponseTotalMs: 'http.response.total_ms', + HttpServerDurationMs: 'http.server.duration_ms', + HttpStatusCode: 'http.status_code', + HttpTarget: 'http.target', + HttpUrl: 'http.url', + HttpUserAgent: 'http.user_agent', + InvitationRole: 'invitation.role', + KnowledgeBaseId: 'knowledge_base.id', + KnowledgeBaseName: 'knowledge_base.name', + LlmErrorStage: 'llm.error_stage', + LlmRequestBodyBytes: 'llm.request.body_bytes', + LlmStreamBytes: 'llm.stream.bytes', + LlmStreamChunks: 'llm.stream.chunks', + LlmStreamFirstChunkBytes: 'llm.stream.first_chunk_bytes', + LlmStreamFirstChunkMs: 'llm.stream.first_chunk_ms', + LlmStreamOpenMs: 'llm.stream.open_ms', + LlmStreamTotalMs: 'llm.stream.total_ms', + LockAcquired: 'lock.acquired', + LockBackend: 'lock.backend', + LockTimedOut: 'lock.timed_out', + LockTimeoutMs: 'lock.timeout_ms', + LoopId: 'loop.id', + LoopIterations: 'loop.iterations', + LoopName: 'loop.name', + McpExecutionStatus: 'mcp.execution_status', + McpServerId: 'mcp.server_id', + McpServerName: 'mcp.server_name', + McpToolName: 'mcp.tool_name', + McpTransport: 'mcp.transport', + MemberRole: 'member.role', + MemoryContentBytes: 'memory.content_bytes', + MemoryFound: 'memory.found', + MemoryPath: 'memory.path', + MemoryRowCount: 'memory.row_count', + MessageId: 'message.id', + MessagingDestinationName: 'messaging.destination.name', + MessagingSystem: 'messaging.system', + ModelDurationMs: 'model.duration_ms', + ModelId: 'model.id', + ModelName: 'model.name', + MothershipOrigin: 'mothership.origin', + NetPeerName: 'net.peer.name', + OauthProvider: 'oauth.provider', + ParallelBranches: 'parallel.branches', + ParallelId: 'parallel.id', + ParallelName: 'parallel.name', + PrefsToolCount: 'prefs.tool_count', + ProcessingChunkSize: 'processing.chunk_size', + ProcessingRecipe: 'processing.recipe', + ProviderId: 'provider.id', + RateLimitAttempt: 'rate_limit.attempt', + RateLimitCount: 'rate_limit.count', + RateLimitDelayMs: 'rate_limit.delay_ms', + RateLimitLimit: 'rate_limit.limit', + RateLimitMaxRetries: 'rate_limit.max_retries', + RateLimitOutcome: 'rate_limit.outcome', + RateLimitRetryAfterMs: 'rate_limit.retry_after_ms', + RequestGoTraceId: 'request.go_trace_id', + RequestId: 'request.id', + RequiredVersion: 'required.version', + ResumeRequestBodyBytes: 'resume.request.body_bytes', + ResumeResultsCount: 'resume.results.count', + ResumeResultsDataBytes: 'resume.results.data_bytes', + ResumeResultsFailureCount: 'resume.results.failure_count', + ResumeResultsSuccessCount: 'resume.results.success_count', + RouterBackendName: 'router.backend_name', + RouterBedrockEnabled: 'router.bedrock_enabled', + RouterBedrockSupportedModel: 'router.bedrock_supported_model', + RouterId: 'router.id', + RouterName: 'router.name', + RouterSelectedBackend: 'router.selected_backend', + RouterSelectedPath: 'router.selected_path', + RunId: 'run.id', + SearchResultsCount: 'search.results_count', + ServiceInstanceId: 'service.instance.id', + ServiceName: 'service.name', + ServiceNamespace: 'service.namespace', + ServiceVersion: 'service.version', + SettleCompleted: 'settle.completed', + SettleTimeoutMs: 'settle.timeout_ms', + SettleWaitMs: 'settle.wait_ms', + SimOperation: 'sim.operation', + SimRequestId: 'sim.request_id', + SpanDurationMs: 'span.duration_ms', + SpanStatus: 'span.status', + SpanType: 'span.type', + StreamId: 'stream.id', + SubagentId: 'subagent.id', + SubagentOutcomeContentBytes: 'subagent.outcome.content_bytes', + SubagentOutcomeError: 'subagent.outcome.error', + SubagentOutcomeStructuredType: 'subagent.outcome.structured_type', + SubagentOutcomeSuccess: 'subagent.outcome.success', + SubagentOutcomeToolCallCount: 'subagent.outcome.tool_call_count', + TaskAge: 'task.age', + TaskDecisionCount: 'task.decision_count', + TaskErrorCount: 'task.error_count', + TaskFound: 'task.found', + TaskId: 'task.id', + TaskListLimit: 'task.list_limit', + TaskRows: 'task.rows', + TaskStatus: 'task.status', + TaskStepCount: 'task.step_count', + TelemetrySdkLanguage: 'telemetry.sdk.language', + TelemetrySdkName: 'telemetry.sdk.name', + TelemetrySdkVersion: 'telemetry.sdk.version', + TemplateId: 'template.id', + TemplateName: 'template.name', + ThrottleReason: 'throttle.reason', + ToolArgsBytes: 'tool.args.bytes', + ToolArgsCount: 'tool.args.count', + ToolArgsPreview: 'tool.args.preview', + ToolAsyncWaiterPollCount: 'tool.async_waiter.poll_count', + ToolAsyncWaiterPubsubDeliveries: 'tool.async_waiter.pubsub_deliveries', + ToolAsyncWaiterResolution: 'tool.async_waiter.resolution', + ToolCallId: 'tool.call_id', + ToolClientExecutable: 'tool.client_executable', + ToolCompletionReceived: 'tool.completion.received', + ToolConfirmationStatus: 'tool.confirmation.status', + ToolDurationMs: 'tool.duration_ms', + ToolErrorKind: 'tool.error_kind', + ToolExecutor: 'tool.executor', + ToolExternalService: 'tool.external.service', + ToolId: 'tool.id', + ToolName: 'tool.name', + ToolOutcome: 'tool.outcome', + ToolOutcomeMessage: 'tool.outcome.message', + ToolParentSpan: 'tool.parent_span', + ToolPayloadBytes: 'tool.payload.bytes', + ToolResultArtifact: 'tool.result.artifact', + ToolResultBytes: 'tool.result.bytes', + ToolResultSuccess: 'tool.result.success', + ToolScheduled: 'tool.scheduled', + ToolStatus: 'tool.status', + ToolStatusCode: 'tool.status_code', + ToolStoreStatus: 'tool.store_status', + ToolSync: 'tool.sync', + ToolTimeoutMs: 'tool.timeout_ms', + TraceAborted: 'trace.aborted', + TraceBilledTotalCost: 'trace.billed_total_cost', + TraceCacheReadTokens: 'trace.cache_read_tokens', + TraceCacheWriteTokens: 'trace.cache_write_tokens', + TraceDurationMs: 'trace.duration_ms', + TraceError: 'trace.error', + TraceGoId: 'trace.go_id', + TraceInputTokens: 'trace.input_tokens', + TraceModel: 'trace.model', + TraceOutcome: 'trace.outcome', + TraceOutputTokens: 'trace.output_tokens', + TraceProvider: 'trace.provider', + TraceRawTotalCost: 'trace.raw_total_cost', + TraceSpanCount: 'trace.span_count', + TraceToolCallCount: 'trace.tool_call_count', + UserAuthMethod: 'user.auth_method', + UserAuthProvider: 'user.auth_provider', + UserId: 'user.id', + WebhookId: 'webhook.id', + WebhookProvider: 'webhook.provider', + WebhookTriggerSuccess: 'webhook.trigger_success', + WorkflowBlockTypes: 'workflow.block_types', + WorkflowBlocksCount: 'workflow.blocks_count', + WorkflowCreatedId: 'workflow.created_id', + WorkflowDurationMs: 'workflow.duration_ms', + WorkflowEdgesCount: 'workflow.edges_count', + WorkflowExecutionId: 'workflow.execution_id', + WorkflowHasFolder: 'workflow.has_folder', + WorkflowHasWorkspace: 'workflow.has_workspace', + WorkflowId: 'workflow.id', + WorkflowLoopsCount: 'workflow.loops_count', + WorkflowName: 'workflow.name', + WorkflowNewId: 'workflow.new_id', + WorkflowParallelsCount: 'workflow.parallels_count', + WorkflowSourceId: 'workflow.source_id', + WorkflowTrigger: 'workflow.trigger', + WorkspaceId: 'workspace.id', + WorkspaceName: 'workspace.name', +} as const + +export type TraceAttrKey = keyof typeof TraceAttr +export type TraceAttrValue = (typeof TraceAttr)[TraceAttrKey] + +/** Readonly sorted list of every canonical custom attribute key. */ +export const TraceAttrValues: readonly TraceAttrValue[] = [ + 'abort.backend', + 'abort.found', + 'abort.redis_result', + 'analytics.aborted', + 'analytics.billed_total_cost', + 'analytics.cache_read_tokens', + 'analytics.cache_write_tokens', + 'analytics.customer_type', + 'analytics.duration_ms', + 'analytics.error', + 'analytics.input_tokens', + 'analytics.model', + 'analytics.output_tokens', + 'analytics.provider', + 'analytics.source', + 'analytics.tool_call_count', + 'api_key.id', + 'api_key.name', + 'auth.incoming_internal', + 'auth.key.match', + 'auth.key.preview', + 'auth.key.source', + 'auth.key.type', + 'auth.provider', + 'auth.validate.status_code', + 'aws.region', + 'bedrock.error_code', + 'bedrock.model_id', + 'bedrock.request.body_bytes_retry', + 'billing.attempts', + 'billing.change_type', + 'billing.cost.input_usd', + 'billing.cost.output_usd', + 'billing.cost.total_usd', + 'billing.cost_usd', + 'billing.customer_type', + 'billing.duplicate', + 'billing.duration_ms', + 'billing.has_idempotency_key', + 'billing.idempotency_key', + 'billing.interval', + 'billing.is_mcp', + 'billing.llm_cost', + 'billing.new_plan', + 'billing.outcome', + 'billing.plan', + 'billing.previous_plan', + 'billing.service_charges', + 'billing.source', + 'billing.total_cost', + 'billing.usage.current', + 'billing.usage.exceeded', + 'billing.usage.limit', + 'block.id', + 'block.name', + 'block.type', + 'chat.active_messages_bytes', + 'chat.active_messages_count', + 'chat.append_bytes', + 'chat.append_count', + 'chat.artifact_keys', + 'chat.artifacts_bytes', + 'chat.auth_type', + 'chat.context_count', + 'chat.context_usage', + 'chat.continuation.messages_before', + 'chat.continuation.tool_result_bytes', + 'chat.continuation.tool_result_failure', + 'chat.continuation.tool_result_success', + 'chat.continuation.tool_results', + 'chat.continuation.total_tool_calls', + 'chat.existing_message_count', + 'chat.file_attachment_count', + 'chat.finalize.outcome', + 'chat.found', + 'chat.has_assistant_message', + 'chat.has_output_configs', + 'chat.id', + 'chat.message_bytes', + 'chat.messages_after', + 'chat.messages_bytes', + 'chat.messages_count', + 'chat.persist.outcome', + 'chat.preexisting', + 'chat.rollback_index', + 'chat.tokens_used', + 'chat.type', + 'chat.user_message_id', + 'checkpoint.age', + 'checkpoint.attempts_bytes', + 'checkpoint.bytes.assistant_tool_use', + 'checkpoint.bytes.current_messages', + 'checkpoint.bytes.immediate_results', + 'checkpoint.bytes.pending_tool_calls', + 'checkpoint.bytes.provider_request', + 'checkpoint.bytes.request_context', + 'checkpoint.bytes.tool_usage', + 'checkpoint.cached_credentials_bytes', + 'checkpoint.claimed', + 'checkpoint.claimed_now', + 'checkpoint.completed_bytes', + 'checkpoint.completed_steps', + 'checkpoint.current_messages', + 'checkpoint.decisions_bytes', + 'checkpoint.found', + 'checkpoint.frames', + 'checkpoint.id', + 'checkpoint.immediate_results', + 'checkpoint.message_id', + 'checkpoint.pending_bytes', + 'checkpoint.pending_steps', + 'checkpoint.pending_tool_count', + 'checkpoint.rows', + 'checkpoint.task_id', + 'checkpoint.total_tool_calls', + 'checkpoint.workflow_snapshot_bytes', + 'client.version', + 'condition.id', + 'condition.name', + 'condition.result', + 'context.reduce.budget_chars', + 'context.reduce.caller', + 'context.reduce.did_reduce', + 'context.reduce.input_chars', + 'context.reduce.input_messages', + 'context.reduce.outcome', + 'context.reduce.output_chars', + 'context.reduce.output_messages', + 'context.reduced', + 'context.summarize.input_chars', + 'context.summarize.output_chars', + 'copilot.abort.go_marker_ok', + 'copilot.abort.local_aborted', + 'copilot.abort.outcome', + 'copilot.async_tool.claimed_by', + 'copilot.async_tool.has_error', + 'copilot.async_tool.ids_count', + 'copilot.async_tool.status', + 'copilot.async_tool.worker_id', + 'copilot.branch.kind', + 'copilot.chat.is_new', + 'copilot.commands.count', + 'copilot.confirm.outcome', + 'copilot.contexts.count', + 'copilot.file_attachments.count', + 'copilot.finalize.outcome', + 'copilot.interrupted_prior_stream', + 'copilot.leg', + 'copilot.mode', + 'copilot.operation', + 'copilot.output_file.bytes', + 'copilot.output_file.format', + 'copilot.output_file.id', + 'copilot.output_file.name', + 'copilot.output_file.outcome', + 'copilot.pending_stream.wait_ms', + 'copilot.prefetch', + 'copilot.publisher.client_disconnected', + 'copilot.publisher.saw_complete', + 'copilot.recovery.latest_seq', + 'copilot.recovery.oldest_seq', + 'copilot.recovery.outcome', + 'copilot.recovery.requested_after_seq', + 'copilot.request.outcome', + 'copilot.resource_attachments.count', + 'copilot.resources.aborted', + 'copilot.resources.op', + 'copilot.resources.removed_count', + 'copilot.resources.upserted_count', + 'copilot.result.content_blocks', + 'copilot.result.content_length', + 'copilot.result.tool_calls', + 'copilot.resume.after_cursor', + 'copilot.resume.duration_ms', + 'copilot.resume.event_count', + 'copilot.resume.outcome', + 'copilot.resume.poll_iterations', + 'copilot.resume.preview_session_count', + 'copilot.route', + 'copilot.run.agent', + 'copilot.run.has_completed_at', + 'copilot.run.has_error', + 'copilot.run.model', + 'copilot.run.parent_id', + 'copilot.run.provider', + 'copilot.run.status', + 'copilot.stop.appended_assistant', + 'copilot.stop.blocks_count', + 'copilot.stop.content_length', + 'copilot.stop.outcome', + 'copilot.stream', + 'copilot.surface', + 'copilot.table.id', + 'copilot.table.outcome', + 'copilot.table.row_count', + 'copilot.table.source.content_bytes', + 'copilot.table.source.format', + 'copilot.table.source.path', + 'copilot.trace.span_count', + 'copilot.transport', + 'copilot.user.message_preview', + 'copilot.validate.outcome', + 'copilot.vfs.file.extension', + 'copilot.vfs.file.media_type', + 'copilot.vfs.file.name', + 'copilot.vfs.file.size_bytes', + 'copilot.vfs.has_alpha', + 'copilot.vfs.input.bytes', + 'copilot.vfs.input.height', + 'copilot.vfs.input.media_type_claimed', + 'copilot.vfs.input.media_type_detected', + 'copilot.vfs.input.width', + 'copilot.vfs.metadata.failed', + 'copilot.vfs.outcome', + 'copilot.vfs.output.bytes', + 'copilot.vfs.output.media_type', + 'copilot.vfs.read.image.resized', + 'copilot.vfs.read.outcome', + 'copilot.vfs.read.output.bytes', + 'copilot.vfs.read.output.lines', + 'copilot.vfs.read.output.media_type', + 'copilot.vfs.read.path', + 'copilot.vfs.resize.attempts', + 'copilot.vfs.resize.chosen_dimension', + 'copilot.vfs.resize.chosen_quality', + 'copilot.vfs.resized', + 'copilot.vfs.sharp.load_failed', + 'cost.default_cost', + 'credential_set.id', + 'credential_set.name', + 'db.operation', + 'db.sql.table', + 'db.system', + 'deployment.environment', + 'deployment.version', + 'document.file_size', + 'document.mime_type', + 'documents.count', + 'documents.upload_type', + 'error', + 'error.code', + 'error.internal', + 'error.type', + 'event.name', + 'event.timestamp', + 'execution.blocks_executed', + 'execution.duration_ms', + 'execution.error_message', + 'execution.has_errors', + 'execution.status', + 'execution.total_cost', + 'execution.trigger', + 'function.execution_time_ms', + 'function.id', + 'function.name', + 'gen_ai.agent.id', + 'gen_ai.agent.name', + 'gen_ai.cost.input', + 'gen_ai.cost.output', + 'gen_ai.cost.total', + 'gen_ai.input.messages', + 'gen_ai.operation.name', + 'gen_ai.output.messages', + 'gen_ai.request.assistant_messages', + 'gen_ai.request.content_blocks', + 'gen_ai.request.has_cache_control', + 'gen_ai.request.image_blocks', + 'gen_ai.request.image_data_bytes', + 'gen_ai.request.max_message_blocks', + 'gen_ai.request.messages.count', + 'gen_ai.request.model', + 'gen_ai.request.system_chars', + 'gen_ai.request.text_blocks', + 'gen_ai.request.tool_result_blocks', + 'gen_ai.request.tool_use_blocks', + 'gen_ai.request.tools.count', + 'gen_ai.request.user_messages', + 'gen_ai.system', + 'gen_ai.tool.name', + 'gen_ai.usage.cache_creation_tokens', + 'gen_ai.usage.cache_read_tokens', + 'gen_ai.usage.input_tokens', + 'gen_ai.usage.output_tokens', + 'gen_ai.usage.total_tokens', + 'gen_ai.workflow.execution_id', + 'gen_ai.workflow.id', + 'gen_ai.workflow.name', + 'hosted_key.env_var', + 'http.host', + 'http.method', + 'http.path', + 'http.remote_addr', + 'http.request.content_length', + 'http.response.body_bytes', + 'http.response.content_length', + 'http.response.headers_ms', + 'http.response.total_ms', + 'http.server.duration_ms', + 'http.status_code', + 'http.target', + 'http.url', + 'http.user_agent', + 'invitation.role', + 'knowledge_base.id', + 'knowledge_base.name', + 'llm.error_stage', + 'llm.request.body_bytes', + 'llm.stream.bytes', + 'llm.stream.chunks', + 'llm.stream.first_chunk_bytes', + 'llm.stream.first_chunk_ms', + 'llm.stream.open_ms', + 'llm.stream.total_ms', + 'lock.acquired', + 'lock.backend', + 'lock.timed_out', + 'lock.timeout_ms', + 'loop.id', + 'loop.iterations', + 'loop.name', + 'mcp.execution_status', + 'mcp.server_id', + 'mcp.server_name', + 'mcp.tool_name', + 'mcp.transport', + 'member.role', + 'memory.content_bytes', + 'memory.found', + 'memory.path', + 'memory.row_count', + 'message.id', + 'messaging.destination.name', + 'messaging.system', + 'model.duration_ms', + 'model.id', + 'model.name', + 'mothership.origin', + 'net.peer.name', + 'oauth.provider', + 'parallel.branches', + 'parallel.id', + 'parallel.name', + 'prefs.tool_count', + 'processing.chunk_size', + 'processing.recipe', + 'provider.id', + 'rate_limit.attempt', + 'rate_limit.count', + 'rate_limit.delay_ms', + 'rate_limit.limit', + 'rate_limit.max_retries', + 'rate_limit.outcome', + 'rate_limit.retry_after_ms', + 'request.go_trace_id', + 'request.id', + 'required.version', + 'resume.request.body_bytes', + 'resume.results.count', + 'resume.results.data_bytes', + 'resume.results.failure_count', + 'resume.results.success_count', + 'router.backend_name', + 'router.bedrock_enabled', + 'router.bedrock_supported_model', + 'router.id', + 'router.name', + 'router.selected_backend', + 'router.selected_path', + 'run.id', + 'search.results_count', + 'service.instance.id', + 'service.name', + 'service.namespace', + 'service.version', + 'settle.completed', + 'settle.timeout_ms', + 'settle.wait_ms', + 'sim.operation', + 'sim.request_id', + 'span.duration_ms', + 'span.status', + 'span.type', + 'stream.id', + 'subagent.id', + 'subagent.outcome.content_bytes', + 'subagent.outcome.error', + 'subagent.outcome.structured_type', + 'subagent.outcome.success', + 'subagent.outcome.tool_call_count', + 'task.age', + 'task.decision_count', + 'task.error_count', + 'task.found', + 'task.id', + 'task.list_limit', + 'task.rows', + 'task.status', + 'task.step_count', + 'telemetry.sdk.language', + 'telemetry.sdk.name', + 'telemetry.sdk.version', + 'template.id', + 'template.name', + 'throttle.reason', + 'tool.args.bytes', + 'tool.args.count', + 'tool.args.preview', + 'tool.async_waiter.poll_count', + 'tool.async_waiter.pubsub_deliveries', + 'tool.async_waiter.resolution', + 'tool.call_id', + 'tool.client_executable', + 'tool.completion.received', + 'tool.confirmation.status', + 'tool.duration_ms', + 'tool.error_kind', + 'tool.executor', + 'tool.external.service', + 'tool.id', + 'tool.name', + 'tool.outcome', + 'tool.outcome.message', + 'tool.parent_span', + 'tool.payload.bytes', + 'tool.result.artifact', + 'tool.result.bytes', + 'tool.result.success', + 'tool.scheduled', + 'tool.status', + 'tool.status_code', + 'tool.store_status', + 'tool.sync', + 'tool.timeout_ms', + 'trace.aborted', + 'trace.billed_total_cost', + 'trace.cache_read_tokens', + 'trace.cache_write_tokens', + 'trace.duration_ms', + 'trace.error', + 'trace.go_id', + 'trace.input_tokens', + 'trace.model', + 'trace.outcome', + 'trace.output_tokens', + 'trace.provider', + 'trace.raw_total_cost', + 'trace.span_count', + 'trace.tool_call_count', + 'user.auth_method', + 'user.auth_provider', + 'user.id', + 'webhook.id', + 'webhook.provider', + 'webhook.trigger_success', + 'workflow.block_types', + 'workflow.blocks_count', + 'workflow.created_id', + 'workflow.duration_ms', + 'workflow.edges_count', + 'workflow.execution_id', + 'workflow.has_folder', + 'workflow.has_workspace', + 'workflow.id', + 'workflow.loops_count', + 'workflow.name', + 'workflow.new_id', + 'workflow.parallels_count', + 'workflow.source_id', + 'workflow.trigger', + 'workspace.id', + 'workspace.name', +] as const diff --git a/apps/sim/lib/copilot/generated/trace-spans-v1.ts b/apps/sim/lib/copilot/generated/trace-spans-v1.ts index 8785699f918..1234b81d3a0 100644 --- a/apps/sim/lib/copilot/generated/trace-spans-v1.ts +++ b/apps/sim/lib/copilot/generated/trace-spans-v1.ts @@ -11,8 +11,15 @@ export const TraceSpan = { AnthropicCountTokens: 'anthropic.count_tokens', AsyncToolStoreSet: 'async_tool_store.set', + AuthRateLimitRecord: 'auth.rate_limit.record', AuthValidateKey: 'auth.validate_key', ChatContinueWithToolResults: 'chat.continue_with_tool_results', + ChatExplicitAbortConsume: 'chat.explicit_abort.consume', + ChatExplicitAbortMark: 'chat.explicit_abort.mark', + ChatExplicitAbortPeek: 'chat.explicit_abort.peek', + ChatGateAcquire: 'chat.gate.acquire', + ChatPersistAfterDone: 'chat.persist_after_done', + ChatSetup: 'chat.setup', ContextReduce: 'context.reduce', ContextSummarizeChunk: 'context.summarize_chunk', CopilotAnalyticsFlush: 'copilot.analytics.flush', @@ -33,12 +40,24 @@ export const TraceSpan = { CopilotAsyncRunsUpsertAsyncToolCall: 'copilot.async_runs.upsert_async_tool_call', CopilotAuthValidateApiKey: 'copilot.auth.validate_api_key', CopilotBillingUpdateCost: 'copilot.billing.update_cost', + CopilotChatAbortStream: 'copilot.chat.abort_stream', + CopilotChatAbortWaitSettle: 'copilot.chat.abort_wait_settle', + CopilotChatAcquirePendingStreamLock: 'copilot.chat.acquire_pending_stream_lock', + CopilotChatBuildExecutionContext: 'copilot.chat.build_execution_context', + CopilotChatBuildPayload: 'copilot.chat.build_payload', + CopilotChatBuildWorkspaceContext: 'copilot.chat.build_workspace_context', CopilotChatFinalizeAssistantTurn: 'copilot.chat.finalize_assistant_turn', CopilotChatPersistUserMessage: 'copilot.chat.persist_user_message', + CopilotChatResolveAgentContexts: 'copilot.chat.resolve_agent_contexts', + CopilotChatResolveBranch: 'copilot.chat.resolve_branch', + CopilotChatResolveOrCreateChat: 'copilot.chat.resolve_or_create_chat', + CopilotChatStopStream: 'copilot.chat.stop_stream', + CopilotConfirmToolResult: 'copilot.confirm.tool_result', CopilotFinalizeStream: 'copilot.finalize_stream', CopilotRecoveryCheckReplayGap: 'copilot.recovery.check_replay_gap', CopilotResumeRequest: 'copilot.resume.request', CopilotSubagentExecute: 'copilot.subagent.execute', + CopilotToolWaitForClientResult: 'copilot.tool.wait_for_client_result', CopilotToolsHandleResourceSideEffects: 'copilot.tools.handle_resource_side_effects', CopilotToolsWriteCsvToTable: 'copilot.tools.write_csv_to_table', CopilotToolsWriteOutputFile: 'copilot.tools.write_output_file', @@ -62,8 +81,15 @@ export type TraceSpanValue = (typeof TraceSpan)[TraceSpanKey] export const TraceSpanValues: readonly TraceSpanValue[] = [ 'anthropic.count_tokens', 'async_tool_store.set', + 'auth.rate_limit.record', 'auth.validate_key', 'chat.continue_with_tool_results', + 'chat.explicit_abort.consume', + 'chat.explicit_abort.mark', + 'chat.explicit_abort.peek', + 'chat.gate.acquire', + 'chat.persist_after_done', + 'chat.setup', 'context.reduce', 'context.summarize_chunk', 'copilot.analytics.flush', @@ -84,12 +110,24 @@ export const TraceSpanValues: readonly TraceSpanValue[] = [ 'copilot.async_runs.upsert_async_tool_call', 'copilot.auth.validate_api_key', 'copilot.billing.update_cost', + 'copilot.chat.abort_stream', + 'copilot.chat.abort_wait_settle', + 'copilot.chat.acquire_pending_stream_lock', + 'copilot.chat.build_execution_context', + 'copilot.chat.build_payload', + 'copilot.chat.build_workspace_context', 'copilot.chat.finalize_assistant_turn', 'copilot.chat.persist_user_message', + 'copilot.chat.resolve_agent_contexts', + 'copilot.chat.resolve_branch', + 'copilot.chat.resolve_or_create_chat', + 'copilot.chat.stop_stream', + 'copilot.confirm.tool_result', 'copilot.finalize_stream', 'copilot.recovery.check_replay_gap', 'copilot.resume.request', 'copilot.subagent.execute', + 'copilot.tool.wait_for_client_result', 'copilot.tools.handle_resource_side_effects', 'copilot.tools.write_csv_to_table', 'copilot.tools.write_output_file', diff --git a/apps/sim/lib/copilot/request/go/fetch.ts b/apps/sim/lib/copilot/request/go/fetch.ts index f6fff746158..f0eac9d6e64 100644 --- a/apps/sim/lib/copilot/request/go/fetch.ts +++ b/apps/sim/lib/copilot/request/go/fetch.ts @@ -1,4 +1,5 @@ import { type Context, context, SpanStatusCode, trace } from '@opentelemetry/api' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { traceHeaders } from '@/lib/copilot/request/go/propagation' // Lazy tracer resolution: module-level `trace.getTracer()` can be evaluated @@ -42,12 +43,12 @@ export async function fetchGo(url: string, options: OutboundFetchOptions = {}): spanName ?? `sim → go ${pathname}`, { attributes: { - 'http.method': method, - 'http.url': url, - 'http.target': pathname, - 'net.peer.name': parsed?.host ?? '', - 'copilot.leg': 'sim_to_go', - ...(operation ? { 'copilot.operation': operation } : {}), + [TraceAttr.HttpMethod]: method, + [TraceAttr.HttpUrl]: url, + [TraceAttr.HttpTarget]: pathname, + [TraceAttr.NetPeerName]: parsed?.host ?? '', + [TraceAttr.CopilotLeg]: 'sim_to_go', + ...(operation ? { [TraceAttr.CopilotOperation]: operation } : {}), ...(attributes ?? {}), }, }, @@ -72,10 +73,10 @@ export async function fetchGo(url: string, options: OutboundFetchOptions = {}): ) const elapsedMs = performance.now() - start const contentLength = Number(response.headers.get('content-length') ?? 0) - span.setAttribute('http.status_code', response.status) - span.setAttribute('http.response.headers_ms', Math.round(elapsedMs)) + span.setAttribute(TraceAttr.HttpStatusCode, response.status) + span.setAttribute(TraceAttr.HttpResponseHeadersMs, Math.round(elapsedMs)) if (contentLength > 0) { - span.setAttribute('http.response.content_length', contentLength) + span.setAttribute(TraceAttr.HttpResponseContentLength, contentLength) } if (response.status >= 400) { span.setStatus({ @@ -87,7 +88,7 @@ export async function fetchGo(url: string, options: OutboundFetchOptions = {}): } return response } catch (error) { - span.setAttribute('http.response.headers_ms', Math.round(performance.now() - start)) + span.setAttribute(TraceAttr.HttpResponseHeadersMs, Math.round(performance.now() - start)) span.setStatus({ code: SpanStatusCode.ERROR, message: error instanceof Error ? error.message : String(error), diff --git a/apps/sim/lib/copilot/request/go/stream.ts b/apps/sim/lib/copilot/request/go/stream.ts index 95d80f50eca..aa0f0df2cea 100644 --- a/apps/sim/lib/copilot/request/go/stream.ts +++ b/apps/sim/lib/copilot/request/go/stream.ts @@ -2,6 +2,7 @@ import type { Context } from '@opentelemetry/api' import { createLogger } from '@sim/logger' import { ORCHESTRATION_TIMEOUT_MS } from '@/lib/copilot/constants' import { MothershipStreamV1SpanLifecycleEvent } from '@/lib/copilot/generated/mothership-stream-v1' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { fetchGo } from '@/lib/copilot/request/go/fetch' import { buildPreviewContentUpdate, @@ -128,8 +129,8 @@ export async function runStreamLoop( spanName: `sim → go ${pathname}`, operation: 'stream', attributes: { - 'copilot.stream': true, - ...(requestBodyBytes ? { 'http.request.content_length': requestBodyBytes } : {}), + [TraceAttr.CopilotStream]: true, + ...(requestBodyBytes ? { [TraceAttr.HttpRequestContentLength]: requestBodyBytes } : {}), }, }) const headersElapsedMs = Math.round(performance.now() - fetchStart) diff --git a/apps/sim/lib/copilot/request/handlers/tool.ts b/apps/sim/lib/copilot/request/handlers/tool.ts index 17da4ecbccf..661335f53f0 100644 --- a/apps/sim/lib/copilot/request/handlers/tool.ts +++ b/apps/sim/lib/copilot/request/handlers/tool.ts @@ -7,6 +7,9 @@ import { MothershipStreamV1ToolOutcome, type MothershipStreamV1ToolResultPayload, } from '@/lib/copilot/generated/mothership-stream-v1' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' +import { withCopilotSpan } from '@/lib/copilot/request/otel' import { isToolArgsDeltaStreamEvent, isToolCallStreamEvent, @@ -362,35 +365,56 @@ async function dispatchToolExecution( } } else { toolCall.status = 'executing' - const pendingPromise = (async () => { - await upsertAsyncToolCall({ - runId: context.runId, - toolCallId, - toolName, - args, - status: MothershipStreamV1AsyncToolRecordStatus.running, - }).catch((err) => { - logger.warn(`Failed to persist async tool row for client-executable ${scopeLabel}tool`, { + // Span covers the entire "wait for browser/client to execute this + // tool and report back" window — typically the single largest + // non-LLM latency contributor for mothership requests that use + // client-side tools. Before this, the wait was uninstrumented and + // only visible as a gap in the waterfall. + const pendingPromise = withCopilotSpan( + TraceSpan.CopilotToolWaitForClientResult, + { + 'tool.name': toolName, + 'tool.call_id': toolCallId, + 'tool.timeout_ms': options.timeout || STREAM_TIMEOUT_MS, + ...(context.runId ? { 'run.id': context.runId } : {}), + }, + async (span) => { + await upsertAsyncToolCall({ + runId: context.runId, toolCallId, toolName, - error: err instanceof Error ? err.message : String(err), + args, + status: MothershipStreamV1AsyncToolRecordStatus.running, + }).catch((err) => { + logger.warn( + `Failed to persist async tool row for client-executable ${scopeLabel}tool`, + { + toolCallId, + toolName, + error: err instanceof Error ? err.message : String(err), + } + ) }) - }) - const completion = await waitForToolCompletion( - toolCallId, - options.timeout || STREAM_TIMEOUT_MS, - options.abortSignal - ) - handleClientCompletion(toolCall, toolCallId, completion) - await emitSyntheticToolResult(toolCallId, toolCall.name, completion, options) - return ( - completion ?? { - status: MothershipStreamV1ToolOutcome.error, - message: 'Tool completion missing', - data: { error: 'Tool completion missing' }, + const completion = await waitForToolCompletion( + toolCallId, + options.timeout || STREAM_TIMEOUT_MS, + options.abortSignal + ) + span.setAttribute(TraceAttr.ToolCompletionReceived, completion !== undefined) + if (completion) { + span.setAttribute(TraceAttr.ToolOutcome, completion.status) } - ) - })().catch((err) => { + handleClientCompletion(toolCall, toolCallId, completion) + await emitSyntheticToolResult(toolCallId, toolCall.name, completion, options) + return ( + completion ?? { + status: MothershipStreamV1ToolOutcome.error, + message: 'Tool completion missing', + data: { error: 'Tool completion missing' }, + } + ) + } + ).catch((err) => { logger.error(`Client-executable ${scopeLabel}tool wait failed`, { toolCallId, toolName, diff --git a/apps/sim/lib/copilot/request/lifecycle/finalize.ts b/apps/sim/lib/copilot/request/lifecycle/finalize.ts index 12cc789abc5..229a8ae309b 100644 --- a/apps/sim/lib/copilot/request/lifecycle/finalize.ts +++ b/apps/sim/lib/copilot/request/lifecycle/finalize.ts @@ -5,6 +5,7 @@ import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, } from '@/lib/copilot/generated/mothership-stream-v1' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import type { StreamWriter } from '@/lib/copilot/request/session' import type { OrchestratorResult } from '@/lib/copilot/request/types' @@ -27,14 +28,14 @@ export async function finalizeStream( const outcome = aborted ? 'aborted' : result.success ? 'success' : 'error' const span = getTracer().startSpan('copilot.finalize_stream', { attributes: { - 'copilot.finalize.outcome': outcome, + [TraceAttr.CopilotFinalizeOutcome]: outcome, 'copilot.run.id': runId, 'copilot.request.id': requestId, - 'copilot.result.tool_calls': result.toolCalls?.length ?? 0, - 'copilot.result.content_blocks': result.contentBlocks?.length ?? 0, - 'copilot.result.content_length': result.content?.length ?? 0, - 'copilot.publisher.saw_complete': publisher.sawComplete, - 'copilot.publisher.client_disconnected': publisher.clientDisconnected, + [TraceAttr.CopilotResultToolCalls]: result.toolCalls?.length ?? 0, + [TraceAttr.CopilotResultContentBlocks]: result.contentBlocks?.length ?? 0, + [TraceAttr.CopilotResultContentLength]: result.content?.length ?? 0, + [TraceAttr.CopilotPublisherSawComplete]: publisher.sawComplete, + [TraceAttr.CopilotPublisherClientDisconnected]: publisher.clientDisconnected, }, }) try { diff --git a/apps/sim/lib/copilot/request/lifecycle/start.ts b/apps/sim/lib/copilot/request/lifecycle/start.ts index 7541fd231ce..618cc8b7108 100644 --- a/apps/sim/lib/copilot/request/lifecycle/start.ts +++ b/apps/sim/lib/copilot/request/lifecycle/start.ts @@ -10,6 +10,7 @@ import { MothershipStreamV1SessionKind, } from '@/lib/copilot/generated/mothership-stream-v1' import { RequestTraceV1Outcome } from '@/lib/copilot/generated/request-trace-v1' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { finalizeStream } from '@/lib/copilot/request/lifecycle/finalize' import type { CopilotLifecycleOptions } from '@/lib/copilot/request/lifecycle/run' import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run' @@ -275,15 +276,15 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS rootOutcome = outcome if (lifecycleResult?.usage) { activeOtelRoot.span.setAttributes({ - 'gen_ai.usage.input_tokens': lifecycleResult.usage.prompt ?? 0, - 'gen_ai.usage.output_tokens': lifecycleResult.usage.completion ?? 0, + [TraceAttr.GenAiUsageInputTokens]: lifecycleResult.usage.prompt ?? 0, + [TraceAttr.GenAiUsageOutputTokens]: lifecycleResult.usage.completion ?? 0, }) } if (lifecycleResult?.cost) { activeOtelRoot.span.setAttributes({ - 'billing.cost.input_usd': lifecycleResult.cost.input ?? 0, - 'billing.cost.output_usd': lifecycleResult.cost.output ?? 0, - 'billing.cost.total_usd': lifecycleResult.cost.total ?? 0, + [TraceAttr.BillingCostInputUsd]: lifecycleResult.cost.input ?? 0, + [TraceAttr.BillingCostOutputUsd]: lifecycleResult.cost.output ?? 0, + [TraceAttr.BillingCostTotalUsd]: lifecycleResult.cost.total ?? 0, }) } } @@ -395,8 +396,8 @@ export async function requestChatTitle(params: { spanName: 'sim → go /api/generate-chat-title', operation: 'generate_chat_title', attributes: { - 'gen_ai.request.model': model, - ...(provider ? { 'gen_ai.system': provider } : {}), + [TraceAttr.GenAiRequestModel]: model, + ...(provider ? { [TraceAttr.GenAiSystem]: provider } : {}), }, }) diff --git a/apps/sim/lib/copilot/request/otel.ts b/apps/sim/lib/copilot/request/otel.ts index f4d57b51688..41c4a5dd42d 100644 --- a/apps/sim/lib/copilot/request/otel.ts +++ b/apps/sim/lib/copilot/request/otel.ts @@ -11,6 +11,7 @@ import { trace, } from '@opentelemetry/api' import type { RequestTraceV1Outcome } from '@/lib/copilot/generated/request-trace-v1' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { contextFromRequestHeaders } from '@/lib/copilot/request/go/propagation' @@ -119,7 +120,7 @@ function setAgentInputMessages(span: Span, input: CopilotAgentInputMessages): vo } const serialized = marshalAgentMessages(messages) if (serialized) { - span.setAttribute('gen_ai.input.messages', serialized) + span.setAttribute(TraceAttr.GenAiInputMessages, serialized) } } @@ -140,7 +141,7 @@ function setAgentOutputMessages(span: Span, output: CopilotAgentOutputMessages): if (parts.length === 0) return const serialized = marshalAgentMessages([{ role: 'assistant', parts }]) if (serialized) { - span.setAttribute('gen_ai.output.messages', serialized) + span.setAttribute(TraceAttr.GenAiOutputMessages, serialized) } } @@ -289,13 +290,15 @@ export async function withCopilotToolSpan( `tool.execute ${input.toolName}`, { attributes: { - 'tool.name': input.toolName, - 'tool.call_id': input.toolCallId, - 'tool.executor': 'sim', - ...(input.runId ? { 'run.id': input.runId } : {}), - ...(input.chatId ? { 'chat.id': input.chatId } : {}), - ...(typeof input.argsBytes === 'number' ? { 'tool.args.bytes': input.argsBytes } : {}), - ...(input.argsPreview ? { 'tool.args.preview': input.argsPreview } : {}), + [TraceAttr.ToolName]: input.toolName, + [TraceAttr.ToolCallId]: input.toolCallId, + [TraceAttr.ToolExecutor]: 'sim', + ...(input.runId ? { [TraceAttr.RunId]: input.runId } : {}), + ...(input.chatId ? { [TraceAttr.ChatId]: input.chatId } : {}), + ...(typeof input.argsBytes === 'number' + ? { [TraceAttr.ToolArgsBytes]: input.argsBytes } + : {}), + ...(input.argsPreview ? { [TraceAttr.ToolArgsPreview]: input.argsPreview } : {}), }, }, async (span) => { @@ -343,8 +346,26 @@ export interface CopilotOtelScope { runId?: string streamId?: string transport: 'headless' | 'stream' + /** + * First ~500 chars of the user's prompt, surfaced as + * `copilot.user.message_preview` on the root span. Lets dashboards + * show a "what was this request about" column without having to + * parse the full `gen_ai.input.messages` JSON attribute (which is + * also gated on a separate env var). Safe even when full-content + * capture is off — a preview snippet is useful for operators + * scanning trace lists, low-risk relative to full prompts. + */ + userMessagePreview?: string } +/** + * Max characters kept in `copilot.user.message_preview`. Chosen to + * fit in a dashboard table cell without truncation (most Grafana + * table cells render ~300 chars before wrapping), but long enough + * to disambiguate requests in triage. + */ +const USER_MESSAGE_PREVIEW_MAX_CHARS = 500 + /** * Build the canonical `gen_ai.agent.execute` attribute set from a scope. * Shared between `withCopilotOtelContext` (fully-managed lifetime) and @@ -354,6 +375,7 @@ export interface CopilotOtelScope { function buildAgentSpanAttributes( scope: CopilotOtelScope ): Record { + const preview = truncateUserMessagePreview(scope.userMessagePreview) return { 'gen_ai.agent.name': 'mothership', 'gen_ai.agent.id': scope.transport === 'stream' ? 'mothership-stream' : 'mothership-headless', @@ -367,9 +389,24 @@ function buildAgentSpanAttributes( ...(scope.executionId ? { 'workflow.execution_id': scope.executionId } : {}), ...(scope.runId ? { 'run.id': scope.runId } : {}), ...(scope.streamId ? { 'stream.id': scope.streamId } : {}), + ...(preview ? { 'copilot.user.message_preview': preview } : {}), } } +/** + * Collapse newlines and trim the user's prompt to a fixed length so + * it fits cleanly in a single dashboard table cell. Non-strings are + * ignored (the chat schema enforces string, but this is defensive + * against upstream shape changes). + */ +function truncateUserMessagePreview(raw: unknown): string | undefined { + if (typeof raw !== 'string') return undefined + const collapsed = raw.replace(/\s+/g, ' ').trim() + if (!collapsed) return undefined + if (collapsed.length <= USER_MESSAGE_PREVIEW_MAX_CHARS) return collapsed + return `${collapsed.slice(0, USER_MESSAGE_PREVIEW_MAX_CHARS - 1)}…` +} + /** * Start a `gen_ai.agent.execute` root span with manually-managed * lifetime. Returns the span, its context, and a `finish` callback the @@ -388,6 +425,52 @@ function buildAgentSpanAttributes( * async function (e.g. headless invoke) — it handles the lifecycle for * you. */ +/** + * Request-shape metadata that's only known AFTER the branch resolves + * (can't be set at startCopilotOtelRoot time). Stamped on the root + * `gen_ai.agent.execute` span so dashboards can slice requests by how + * they were sent: which product surface, which mode, which model, with + * attachments or not, and whether the request arrived while a prior + * stream was still alive (i.e. user hit send-to-interrupt). + */ +export interface CopilotOtelRequestShape { + /** + * Product surface. Derived from `branch.kind` — "workflow" means the + * copilot sidebar (attached to a specific workflow), "workspace" + * means the mothership workspace-level chat. Also stamped as a + * human-friendly `copilot.surface` (`copilot` | `mothership`). + */ + branchKind?: 'workflow' | 'workspace' + /** Mothership request mode — `agent`, `ask`, `build`, etc. */ + mode?: string + /** LLM model identifier the caller selected. */ + model?: string + /** LLM provider the caller selected (`anthropic`, `openai`, …). */ + provider?: string + /** Whether this POST created a brand-new chat. */ + createNewChat?: boolean + /** `true` when the caller sent `prefetch: true` (UI speculative send). */ + prefetch?: boolean + /** How many file attachments were present. */ + fileAttachmentsCount?: number + /** How many resource attachments (workspace files, knowledge, …). */ + resourceAttachmentsCount?: number + /** Free-form context blocks the caller attached. */ + contextsCount?: number + /** Explicit commands (e.g. slash commands) present in the request. */ + commandsCount?: number + /** + * Time spent waiting for the per-chat stream lock, in ms. Values + * above ~50ms strongly imply this request arrived while a prior + * stream for the same chat was still in flight (i.e. user pressed + * send-to-interrupt, or a tab refresh overlapped with an active + * request). + */ + pendingStreamWaitMs?: number + /** True if `pendingStreamWaitMs` was non-trivially long. */ + interruptedPriorStream?: boolean +} + export interface CopilotOtelRoot { span: Span context: Context @@ -406,6 +489,13 @@ export interface CopilotOtelRoot { * invoked tool calls are known. */ setOutputMessages: (output: CopilotAgentOutputMessages) => void + /** + * Stamp request-shape attributes that are only known after the + * branch resolves (mode, provider, model, surface, attachment + * counts, interrupt signal). Safe to call multiple times — later + * calls override earlier ones for the same key. + */ + setRequestShape: (shape: CopilotOtelRequestShape) => void } export function startCopilotOtelRoot(scope: CopilotOtelScope): CopilotOtelRoot { @@ -431,7 +521,7 @@ export function startCopilotOtelRoot(scope: CopilotOtelScope): CopilotOtelRoot { const finish: CopilotOtelRoot['finish'] = (outcome, error) => { if (finished) return finished = true - span.setAttribute('copilot.request.outcome', outcome) + span.setAttribute(TraceAttr.CopilotRequestOutcome, outcome ?? 'success') if (error) { span.setStatus({ code: SpanStatusCode.ERROR, @@ -450,6 +540,57 @@ export function startCopilotOtelRoot(scope: CopilotOtelScope): CopilotOtelRoot { finish, setInputMessages: (input) => setAgentInputMessages(span, input), setOutputMessages: (output) => setAgentOutputMessages(span, output), + setRequestShape: (shape) => applyRequestShape(span, shape), + } +} + +/** + * Threshold (ms) above which we consider a pending-stream-lock wait + * to indicate this request interrupted a prior in-flight stream. Well + * above the typical uncontested acquire (<10ms) but below any normal + * human-caused delay. Tuned to flag overlap cases — not perfect, but + * useful for filtering dashboards. + */ +const INTERRUPT_WAIT_MS_THRESHOLD = 50 + +function applyRequestShape(span: Span, shape: CopilotOtelRequestShape): void { + if (shape.branchKind) { + span.setAttribute(TraceAttr.CopilotBranchKind, shape.branchKind) + span.setAttribute( + TraceAttr.CopilotSurface, + shape.branchKind === 'workflow' ? 'copilot' : 'mothership' + ) + } + if (shape.mode) span.setAttribute(TraceAttr.CopilotMode, shape.mode) + if (shape.model) span.setAttribute(TraceAttr.GenAiRequestModel, shape.model) + if (shape.provider) span.setAttribute(TraceAttr.GenAiSystem, shape.provider) + if (typeof shape.createNewChat === 'boolean') { + span.setAttribute(TraceAttr.CopilotChatIsNew, shape.createNewChat) + } + if (typeof shape.prefetch === 'boolean') { + span.setAttribute(TraceAttr.CopilotPrefetch, shape.prefetch) + } + if (typeof shape.fileAttachmentsCount === 'number') { + span.setAttribute(TraceAttr.CopilotFileAttachmentsCount, shape.fileAttachmentsCount) + } + if (typeof shape.resourceAttachmentsCount === 'number') { + span.setAttribute(TraceAttr.CopilotResourceAttachmentsCount, shape.resourceAttachmentsCount) + } + if (typeof shape.contextsCount === 'number') { + span.setAttribute(TraceAttr.CopilotContextsCount, shape.contextsCount) + } + if (typeof shape.commandsCount === 'number') { + span.setAttribute(TraceAttr.CopilotCommandsCount, shape.commandsCount) + } + if (typeof shape.pendingStreamWaitMs === 'number') { + span.setAttribute(TraceAttr.CopilotPendingStreamWaitMs, shape.pendingStreamWaitMs) + const interrupted = + typeof shape.interruptedPriorStream === 'boolean' + ? shape.interruptedPriorStream + : shape.pendingStreamWaitMs > INTERRUPT_WAIT_MS_THRESHOLD + span.setAttribute(TraceAttr.CopilotInterruptedPriorStream, interrupted) + } else if (typeof shape.interruptedPriorStream === 'boolean') { + span.setAttribute(TraceAttr.CopilotInterruptedPriorStream, shape.interruptedPriorStream) } } diff --git a/apps/sim/lib/copilot/request/session/abort.ts b/apps/sim/lib/copilot/request/session/abort.ts index 3502f4a69f8..bbd1cd19695 100644 --- a/apps/sim/lib/copilot/request/session/abort.ts +++ b/apps/sim/lib/copilot/request/session/abort.ts @@ -1,4 +1,7 @@ import { createLogger } from '@sim/logger' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' +import { withCopilotSpan } from '@/lib/copilot/request/otel' import { acquireLock, getRedisClient, releaseLock } from '@/lib/core/config/redis' import { clearAbortMarker, hasAbortMarker, writeAbortMarker } from './buffer' @@ -120,59 +123,80 @@ export async function acquirePendingChatStream( streamId: string, timeoutMs = 5_000 ): Promise { - const redis = getRedisClient() - if (redis) { - const deadline = Date.now() + timeoutMs - for (;;) { - try { - const acquired = await acquireLock( - getChatStreamLockKey(chatId), - streamId, - CHAT_STREAM_LOCK_TTL_SECONDS - ) - if (acquired) { - registerPendingChatStream(chatId, streamId) - return true - } - if (!pendingChatStreams.has(chatId)) { - const ownerStreamId = await redis.get(getChatStreamLockKey(chatId)) - if (ownerStreamId) { - const settled = await waitForPendingChatStream(chatId, 0, ownerStreamId) - if (settled) { - continue + // Span records wall time spent waiting for the per-chat stream lock. + // Typical case: sub-10ms uncontested acquire. Worst case: up to + // `timeoutMs` spent polling while a prior stream finishes. Previously + // this time looked like "unexplained gap before llm.stream". + return withCopilotSpan( + TraceSpan.CopilotChatAcquirePendingStreamLock, + { + 'chat.id': chatId, + 'stream.id': streamId, + 'lock.timeout_ms': timeoutMs, + }, + async (span) => { + const redis = getRedisClient() + span.setAttribute(TraceAttr.LockBackend, redis ? 'redis' : 'in_process') + if (redis) { + const deadline = Date.now() + timeoutMs + for (;;) { + try { + const acquired = await acquireLock( + getChatStreamLockKey(chatId), + streamId, + CHAT_STREAM_LOCK_TTL_SECONDS + ) + if (acquired) { + registerPendingChatStream(chatId, streamId) + span.setAttribute(TraceAttr.LockAcquired, true) + return true + } + if (!pendingChatStreams.has(chatId)) { + const ownerStreamId = await redis.get(getChatStreamLockKey(chatId)) + if (ownerStreamId) { + const settled = await waitForPendingChatStream(chatId, 0, ownerStreamId) + if (settled) { + continue + } + } } + } catch (error) { + logger.warn('Failed to acquire chat stream lock', { + chatId, + streamId, + error: error instanceof Error ? error.message : String(error), + }) } - } - } catch (error) { - logger.warn('Failed to acquire chat stream lock', { - chatId, - streamId, - error: error instanceof Error ? error.message : String(error), - }) - } - if (Date.now() >= deadline) { - return false + if (Date.now() >= deadline) { + span.setAttribute(TraceAttr.LockAcquired, false) + span.setAttribute(TraceAttr.LockTimedOut, true) + return false + } + await new Promise((resolve) => setTimeout(resolve, 200)) + } } - await new Promise((resolve) => setTimeout(resolve, 200)) - } - } - for (;;) { - const existing = pendingChatStreams.get(chatId) - if (!existing) { - registerPendingChatStream(chatId, streamId) - return true - } + for (;;) { + const existing = pendingChatStreams.get(chatId) + if (!existing) { + registerPendingChatStream(chatId, streamId) + span.setAttribute(TraceAttr.LockAcquired, true) + return true + } - const settled = await Promise.race([ - existing.promise.then(() => true), - new Promise((resolve) => setTimeout(() => resolve(false), timeoutMs)), - ]) - if (!settled) { - return false + const settled = await Promise.race([ + existing.promise.then(() => true), + new Promise((resolve) => setTimeout(() => resolve(false), timeoutMs)), + ]) + if (!settled) { + span.setAttribute(TraceAttr.LockAcquired, false) + span.setAttribute(TraceAttr.LockTimedOut, true) + return false + } + } } - } + ) } /** diff --git a/apps/sim/lib/copilot/request/session/explicit-abort.ts b/apps/sim/lib/copilot/request/session/explicit-abort.ts index df080c4a3c8..c5356050307 100644 --- a/apps/sim/lib/copilot/request/session/explicit-abort.ts +++ b/apps/sim/lib/copilot/request/session/explicit-abort.ts @@ -1,5 +1,6 @@ import type { Context } from '@opentelemetry/api' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { fetchGo } from '@/lib/copilot/request/go/fetch' import { env } from '@/lib/core/config/env' @@ -45,7 +46,7 @@ export async function requestExplicitStreamAbort(params: { operation: 'explicit_abort', attributes: { 'copilot.stream.id': streamId, - ...(chatId ? { 'chat.id': chatId } : {}), + ...(chatId ? { [TraceAttr.ChatId]: chatId } : {}), }, }) diff --git a/apps/sim/lib/copilot/request/session/recovery.ts b/apps/sim/lib/copilot/request/session/recovery.ts index 56d2ab15481..3dcc3b341f1 100644 --- a/apps/sim/lib/copilot/request/session/recovery.ts +++ b/apps/sim/lib/copilot/request/session/recovery.ts @@ -3,6 +3,7 @@ import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, } from '@/lib/copilot/generated/mothership-stream-v1' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' import { getLatestSeq, getOldestSeq, readEvents } from './buffer' @@ -38,8 +39,8 @@ export async function checkForReplayGap( const oldestSeq = await getOldestSeq(streamId) const latestSeq = await getLatestSeq(streamId) span.setAttributes({ - 'copilot.recovery.oldest_seq': oldestSeq ?? -1, - 'copilot.recovery.latest_seq': latestSeq ?? -1, + [TraceAttr.CopilotRecoveryOldestSeq]: oldestSeq ?? -1, + [TraceAttr.CopilotRecoveryLatestSeq]: latestSeq ?? -1, }) if ( @@ -55,7 +56,7 @@ export async function checkForReplayGap( oldestAvailableSeq: oldestSeq, latestSeq, }) - span.setAttribute('copilot.recovery.outcome', 'gap_detected') + span.setAttribute(TraceAttr.CopilotRecoveryOutcome, 'gap_detected') const gapEnvelope = createEvent({ streamId, @@ -91,7 +92,7 @@ export async function checkForReplayGap( } } - span.setAttribute('copilot.recovery.outcome', 'in_range') + span.setAttribute(TraceAttr.CopilotRecoveryOutcome, 'in_range') return null } ) diff --git a/apps/sim/lib/copilot/request/subagent.ts b/apps/sim/lib/copilot/request/subagent.ts index 933e922ad80..7f36dcba97c 100644 --- a/apps/sim/lib/copilot/request/subagent.ts +++ b/apps/sim/lib/copilot/request/subagent.ts @@ -5,6 +5,7 @@ import { MothershipStreamV1EventType, MothershipStreamV1SpanPayloadKind, } from '@/lib/copilot/generated/mothership-stream-v1' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { createStreamingContext } from '@/lib/copilot/request/context/request-context' import { buildToolCallSummaries } from '@/lib/copilot/request/context/result' @@ -66,13 +67,15 @@ export async function orchestrateSubagentStream( async (otelSpan) => { const result = await orchestrateSubagentStreamInner(agentId, requestPayload, options) otelSpan.setAttributes({ - 'subagent.outcome.success': result.success, - 'subagent.outcome.tool_call_count': result.toolCalls.length, - 'subagent.outcome.content_bytes': result.content?.length ?? 0, + [TraceAttr.SubagentOutcomeSuccess]: result.success, + [TraceAttr.SubagentOutcomeToolCallCount]: result.toolCalls.length, + [TraceAttr.SubagentOutcomeContentBytes]: result.content?.length ?? 0, ...(result.structuredResult?.type - ? { 'subagent.outcome.structured_type': result.structuredResult.type } + ? { [TraceAttr.SubagentOutcomeStructuredType]: result.structuredResult.type } + : {}), + ...(result.error + ? { [TraceAttr.SubagentOutcomeError]: String(result.error).slice(0, 500) } : {}), - ...(result.error ? { 'subagent.outcome.error': String(result.error).slice(0, 500) } : {}), }) return result } diff --git a/apps/sim/lib/copilot/request/tools/executor.ts b/apps/sim/lib/copilot/request/tools/executor.ts index 55b358221df..47d08fb7b4e 100644 --- a/apps/sim/lib/copilot/request/tools/executor.ts +++ b/apps/sim/lib/copilot/request/tools/executor.ts @@ -17,6 +17,7 @@ import { MothershipStreamV1ToolPhase, } from '@/lib/copilot/generated/mothership-stream-v1' import { CreateWorkflow } from '@/lib/copilot/generated/tool-catalog-v1' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { publishToolConfirmation } from '@/lib/copilot/persistence/tool-confirm' import { withCopilotToolSpan } from '@/lib/copilot/request/otel' import { markToolResultSeen } from '@/lib/copilot/request/sse-utils' @@ -259,9 +260,12 @@ export async function executeToolAndReport( }, async (otelSpan) => { const completion = await executeToolAndReportInner(toolCall, context, execContext, options) - otelSpan.setAttribute('tool.outcome', completion.status) + otelSpan.setAttribute(TraceAttr.ToolOutcome, completion.status) if (completion.message) { - otelSpan.setAttribute('tool.outcome.message', String(completion.message).slice(0, 500)) + otelSpan.setAttribute( + TraceAttr.ToolOutcomeMessage, + String(completion.message).slice(0, 500) + ) } return completion } diff --git a/apps/sim/lib/copilot/request/tools/files.ts b/apps/sim/lib/copilot/request/tools/files.ts index 4bfb35d00b3..867c4e1fe55 100644 --- a/apps/sim/lib/copilot/request/tools/files.ts +++ b/apps/sim/lib/copilot/request/tools/files.ts @@ -1,5 +1,6 @@ import { createLogger } from '@sim/logger' import { FunctionExecute, UserTable } from '@/lib/copilot/generated/tool-catalog-v1' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/request/types' @@ -162,8 +163,8 @@ export async function maybeWriteOutputToFile( const fileName = normalizeOutputWorkspaceFileName(outputPath) const format = resolveOutputFormat(fileName, explicitFormat) span.setAttributes({ - 'copilot.output_file.name': fileName, - 'copilot.output_file.format': format, + [TraceAttr.CopilotOutputFileName]: fileName, + [TraceAttr.CopilotOutputFileFormat]: format, }) if (context.abortSignal?.aborted) { throw new Error('Request aborted before tool mutation could be applied') @@ -172,7 +173,7 @@ export async function maybeWriteOutputToFile( const contentType = FORMAT_TO_CONTENT_TYPE[format] const buffer = Buffer.from(content, 'utf-8') - span.setAttribute('copilot.output_file.bytes', buffer.length) + span.setAttribute(TraceAttr.CopilotOutputFileBytes, buffer.length) if (context.abortSignal?.aborted) { throw new Error('Request aborted before tool mutation could be applied') } @@ -184,8 +185,8 @@ export async function maybeWriteOutputToFile( contentType ) span.setAttributes({ - 'copilot.output_file.id': uploaded.id, - 'copilot.output_file.outcome': 'uploaded', + [TraceAttr.CopilotOutputFileId]: uploaded.id, + [TraceAttr.CopilotOutputFileOutcome]: 'uploaded', }) logger.info('Tool output written to file', { @@ -213,7 +214,7 @@ export async function maybeWriteOutputToFile( outputPath, error: message, }) - span.setAttribute('copilot.output_file.outcome', 'failed') + span.setAttribute(TraceAttr.CopilotOutputFileOutcome, 'failed') span.addEvent('copilot.output_file.error', { 'error.message': message.slice(0, 500), }) diff --git a/apps/sim/lib/copilot/request/tools/resources.ts b/apps/sim/lib/copilot/request/tools/resources.ts index 84948df08ed..bb757350769 100644 --- a/apps/sim/lib/copilot/request/tools/resources.ts +++ b/apps/sim/lib/copilot/request/tools/resources.ts @@ -3,6 +3,7 @@ import { MothershipStreamV1EventType, MothershipStreamV1ResourceOp, } from '@/lib/copilot/generated/mothership-stream-v1' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' import type { StreamEvent, ToolCallResult } from '@/lib/copilot/request/types' @@ -118,10 +119,14 @@ export async function handleResourceSideEffects( } span.setAttributes({ - 'copilot.resources.op': isDeleteOp ? 'delete' : upsertedCount > 0 ? 'upsert' : 'none', - 'copilot.resources.removed_count': removedCount, - 'copilot.resources.upserted_count': upsertedCount, - 'copilot.resources.aborted': isAborted(), + [TraceAttr.CopilotResourcesOp]: isDeleteOp + ? 'delete' + : upsertedCount > 0 + ? 'upsert' + : 'none', + [TraceAttr.CopilotResourcesRemovedCount]: removedCount, + [TraceAttr.CopilotResourcesUpsertedCount]: upsertedCount, + [TraceAttr.CopilotResourcesAborted]: isAborted(), }) } ) diff --git a/apps/sim/lib/copilot/request/tools/tables.ts b/apps/sim/lib/copilot/request/tools/tables.ts index a7df1e9ed99..a012a4ddda8 100644 --- a/apps/sim/lib/copilot/request/tools/tables.ts +++ b/apps/sim/lib/copilot/request/tools/tables.ts @@ -4,6 +4,7 @@ import { createLogger } from '@sim/logger' import { parse as csvParse } from 'csv-parse/sync' import { eq } from 'drizzle-orm' import { FunctionExecute, Read as ReadTool } from '@/lib/copilot/generated/tool-catalog-v1' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/request/types' @@ -38,7 +39,7 @@ export async function maybeWriteOutputToTable( try { const table = await getTableById(outputTable) if (!table) { - span.setAttribute('copilot.table.outcome', 'table_not_found') + span.setAttribute(TraceAttr.CopilotTableOutcome, 'table_not_found') return { success: false, error: `Table "${outputTable}" not found`, @@ -53,7 +54,7 @@ export async function maybeWriteOutputToTable( if (Array.isArray(inner)) { rows = inner } else { - span.setAttribute('copilot.table.outcome', 'invalid_shape') + span.setAttribute(TraceAttr.CopilotTableOutcome, 'invalid_shape') return { success: false, error: 'outputTable requires the code to return an array of objects', @@ -62,17 +63,17 @@ export async function maybeWriteOutputToTable( } else if (Array.isArray(rawOutput)) { rows = rawOutput } else { - span.setAttribute('copilot.table.outcome', 'invalid_shape') + span.setAttribute(TraceAttr.CopilotTableOutcome, 'invalid_shape') return { success: false, error: 'outputTable requires the code to return an array of objects', } } - span.setAttribute('copilot.table.row_count', rows.length) + span.setAttribute(TraceAttr.CopilotTableRowCount, rows.length) if (rows.length > MAX_OUTPUT_TABLE_ROWS) { - span.setAttribute('copilot.table.outcome', 'row_limit_exceeded') + span.setAttribute(TraceAttr.CopilotTableOutcome, 'row_limit_exceeded') return { success: false, error: `outputTable row limit exceeded: got ${rows.length}, max is ${MAX_OUTPUT_TABLE_ROWS}`, @@ -80,7 +81,7 @@ export async function maybeWriteOutputToTable( } if (rows.length === 0) { - span.setAttribute('copilot.table.outcome', 'empty_rows') + span.setAttribute(TraceAttr.CopilotTableOutcome, 'empty_rows') return { success: false, error: 'outputTable requires at least one row — code returned an empty array', @@ -121,7 +122,7 @@ export async function maybeWriteOutputToTable( tableId: outputTable, rowCount: rows.length, }) - span.setAttribute('copilot.table.outcome', 'wrote') + span.setAttribute(TraceAttr.CopilotTableOutcome, 'wrote') return { success: true, output: { @@ -136,7 +137,7 @@ export async function maybeWriteOutputToTable( outputTable, error: err instanceof Error ? err.message : String(err), }) - span.setAttribute('copilot.table.outcome', 'failed') + span.setAttribute(TraceAttr.CopilotTableOutcome, 'failed') span.addEvent('copilot.table.error', { 'error.message': (err instanceof Error ? err.message : String(err)).slice(0, 500), }) @@ -173,23 +174,23 @@ export async function maybeWriteReadCsvToTable( try { const table = await getTableById(outputTable) if (!table) { - span.setAttribute('copilot.table.outcome', 'table_not_found') + span.setAttribute(TraceAttr.CopilotTableOutcome, 'table_not_found') return { success: false, error: `Table "${outputTable}" not found` } } const output = result.output as Record const content = (output.content as string) || '' if (!content.trim()) { - span.setAttribute('copilot.table.outcome', 'empty_content') + span.setAttribute(TraceAttr.CopilotTableOutcome, 'empty_content') return { success: false, error: 'File has no content to import into table' } } const filePath = (params?.path as string) || '' const ext = filePath.split('.').pop()?.toLowerCase() span.setAttributes({ - 'copilot.table.source.path': filePath, - 'copilot.table.source.format': ext === 'json' ? 'json' : 'csv', - 'copilot.table.source.content_bytes': content.length, + [TraceAttr.CopilotTableSourcePath]: filePath, + [TraceAttr.CopilotTableSourceFormat]: ext === 'json' ? 'json' : 'csv', + [TraceAttr.CopilotTableSourceContentBytes]: content.length, }) let rows: Record[] @@ -197,7 +198,7 @@ export async function maybeWriteReadCsvToTable( if (ext === 'json') { const parsed = JSON.parse(content) if (!Array.isArray(parsed)) { - span.setAttribute('copilot.table.outcome', 'invalid_json_shape') + span.setAttribute(TraceAttr.CopilotTableOutcome, 'invalid_json_shape') return { success: false, error: 'JSON file must contain an array of objects for table import', @@ -216,15 +217,15 @@ export async function maybeWriteReadCsvToTable( }) as Record[] } - span.setAttribute('copilot.table.row_count', rows.length) + span.setAttribute(TraceAttr.CopilotTableRowCount, rows.length) if (rows.length === 0) { - span.setAttribute('copilot.table.outcome', 'empty_rows') + span.setAttribute(TraceAttr.CopilotTableOutcome, 'empty_rows') return { success: false, error: 'File has no data rows to import' } } if (rows.length > MAX_OUTPUT_TABLE_ROWS) { - span.setAttribute('copilot.table.outcome', 'row_limit_exceeded') + span.setAttribute(TraceAttr.CopilotTableOutcome, 'row_limit_exceeded') return { success: false, error: `Row limit exceeded: got ${rows.length}, max is ${MAX_OUTPUT_TABLE_ROWS}`, @@ -267,7 +268,7 @@ export async function maybeWriteReadCsvToTable( rowCount: rows.length, filePath, }) - span.setAttribute('copilot.table.outcome', 'imported') + span.setAttribute(TraceAttr.CopilotTableOutcome, 'imported') return { success: true, output: { @@ -283,7 +284,7 @@ export async function maybeWriteReadCsvToTable( outputTable, error: err instanceof Error ? err.message : String(err), }) - span.setAttribute('copilot.table.outcome', 'failed') + span.setAttribute(TraceAttr.CopilotTableOutcome, 'failed') span.addEvent('copilot.table.error', { 'error.message': (err instanceof Error ? err.message : String(err)).slice(0, 500), }) diff --git a/apps/sim/lib/copilot/request/trace.ts b/apps/sim/lib/copilot/request/trace.ts index 0e4d686942f..f73fca3f75d 100644 --- a/apps/sim/lib/copilot/request/trace.ts +++ b/apps/sim/lib/copilot/request/trace.ts @@ -10,6 +10,7 @@ import { RequestTraceV1SpanStatus, type RequestTraceV1UsageSummary, } from '@/lib/copilot/generated/request-trace-v1' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { env } from '@/lib/core/config/env' const logger = createLogger('RequestTrace') @@ -126,8 +127,8 @@ export async function reportTrace( operation: 'report_trace', attributes: { 'copilot.request.id': trace.simRequestId ?? '', - 'http.request.content_length': body.length, - 'copilot.trace.span_count': trace.spans?.length ?? 0, + [TraceAttr.HttpRequestContentLength]: body.length, + [TraceAttr.CopilotTraceSpanCount]: trace.spans?.length ?? 0, }, }) diff --git a/apps/sim/lib/copilot/vfs/file-reader.ts b/apps/sim/lib/copilot/vfs/file-reader.ts index eea76a44e71..9550ee79771 100644 --- a/apps/sim/lib/copilot/vfs/file-reader.ts +++ b/apps/sim/lib/copilot/vfs/file-reader.ts @@ -1,5 +1,6 @@ import { type Span, SpanStatusCode, trace } from '@opentelemetry/api' import { createLogger } from '@sim/logger' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import type { WorkspaceFileRecord } from '@/lib/uploads/contexts/workspace/workspace-file-manager' import { downloadWorkspaceFile } from '@/lib/uploads/contexts/workspace/workspace-file-manager' @@ -86,14 +87,14 @@ async function prepareImageForVision( TraceSpan.CopilotVfsPrepareImage, { attributes: { - 'copilot.vfs.input.bytes': buffer.length, - 'copilot.vfs.input.media_type_claimed': claimedType, + [TraceAttr.CopilotVfsInputBytes]: buffer.length, + [TraceAttr.CopilotVfsInputMediaTypeClaimed]: claimedType, }, }, async (span) => { try { const mediaType = detectImageMime(buffer, claimedType) - span.setAttribute('copilot.vfs.input.media_type_detected', mediaType) + span.setAttribute(TraceAttr.CopilotVfsInputMediaTypeDetected, mediaType) let sharpModule: typeof import('sharp').default try { @@ -103,10 +104,10 @@ async function prepareImageForVision( mediaType, error: err instanceof Error ? err.message : String(err), }) - span.setAttribute('copilot.vfs.sharp.load_failed', true) + span.setAttribute(TraceAttr.CopilotVfsSharpLoadFailed, true) const fitsWithoutSharp = buffer.length <= MAX_IMAGE_READ_BYTES span.setAttribute( - 'copilot.vfs.outcome', + TraceAttr.CopilotVfsOutcome, fitsWithoutSharp ? 'passthrough_no_sharp' : 'rejected_no_sharp' ) return fitsWithoutSharp ? { buffer, mediaType, resized: false } : null @@ -120,10 +121,10 @@ async function prepareImageForVision( mediaType, error: err instanceof Error ? err.message : String(err), }) - span.setAttribute('copilot.vfs.metadata.failed', true) + span.setAttribute(TraceAttr.CopilotVfsMetadataFailed, true) const fitsWithoutSharp = buffer.length <= MAX_IMAGE_READ_BYTES span.setAttribute( - 'copilot.vfs.outcome', + TraceAttr.CopilotVfsOutcome, fitsWithoutSharp ? 'passthrough_no_metadata' : 'rejected_no_metadata' ) return fitsWithoutSharp ? { buffer, mediaType, resized: false } : null @@ -132,8 +133,8 @@ async function prepareImageForVision( const width = metadata.width ?? 0 const height = metadata.height ?? 0 span.setAttributes({ - 'copilot.vfs.input.width': width, - 'copilot.vfs.input.height': height, + [TraceAttr.CopilotVfsInputWidth]: width, + [TraceAttr.CopilotVfsInputHeight]: height, }) const needsResize = @@ -142,10 +143,10 @@ async function prepareImageForVision( height > MAX_IMAGE_DIMENSION if (!needsResize) { span.setAttributes({ - 'copilot.vfs.resized': false, - 'copilot.vfs.outcome': 'passthrough_fits_budget', - 'copilot.vfs.output.bytes': buffer.length, - 'copilot.vfs.output.media_type': mediaType, + [TraceAttr.CopilotVfsResized]: false, + [TraceAttr.CopilotVfsOutcome]: 'passthrough_fits_budget', + [TraceAttr.CopilotVfsOutputBytes]: buffer.length, + [TraceAttr.CopilotVfsOutputMediaType]: mediaType, }) return { buffer, mediaType, resized: false } } @@ -156,7 +157,7 @@ async function prepareImageForVision( mediaType === 'image/webp' || mediaType === 'image/gif' ) - span.setAttribute('copilot.vfs.has_alpha', hasAlpha) + span.setAttribute(TraceAttr.CopilotVfsHasAlpha, hasAlpha) let attempts = 0 for (const dimension of IMAGE_RESIZE_DIMENSIONS) { @@ -203,13 +204,13 @@ async function prepareImageForVision( outputMediaType: transformed.mediaType, }) span.setAttributes({ - 'copilot.vfs.resized': true, - 'copilot.vfs.resize.attempts': attempts, - 'copilot.vfs.resize.chosen_dimension': dimension, - 'copilot.vfs.resize.chosen_quality': quality, - 'copilot.vfs.output.bytes': transformed.buffer.length, - 'copilot.vfs.output.media_type': transformed.mediaType, - 'copilot.vfs.outcome': 'resized', + [TraceAttr.CopilotVfsResized]: true, + [TraceAttr.CopilotVfsResizeAttempts]: attempts, + [TraceAttr.CopilotVfsResizeChosenDimension]: dimension, + [TraceAttr.CopilotVfsResizeChosenQuality]: quality, + [TraceAttr.CopilotVfsOutputBytes]: transformed.buffer.length, + [TraceAttr.CopilotVfsOutputMediaType]: transformed.mediaType, + [TraceAttr.CopilotVfsOutcome]: 'resized', }) return { buffer: transformed.buffer, @@ -234,9 +235,9 @@ async function prepareImageForVision( } span.setAttributes({ - 'copilot.vfs.resized': false, - 'copilot.vfs.resize.attempts': attempts, - 'copilot.vfs.outcome': 'rejected_too_large_after_resize', + [TraceAttr.CopilotVfsResized]: false, + [TraceAttr.CopilotVfsResizeAttempts]: attempts, + [TraceAttr.CopilotVfsOutcome]: 'rejected_too_large_after_resize', }) return null } catch (err) { @@ -277,20 +278,20 @@ export async function readFileRecord(record: WorkspaceFileRecord): Promise { try { if (isImageFileType(record.type)) { - span.setAttribute('copilot.vfs.read.path', 'image') + span.setAttribute(TraceAttr.CopilotVfsReadPath, 'image') const originalBuffer = await downloadWorkspaceFile(record) const prepared = await prepareImageForVision(originalBuffer, record.type) if (!prepared) { - span.setAttribute('copilot.vfs.read.outcome', 'image_too_large') + span.setAttribute(TraceAttr.CopilotVfsReadOutcome, 'image_too_large') return { content: `[Image too large: ${record.name} (${(record.size / 1024 / 1024).toFixed(1)}MB, limit 5MB after resize/compression)]`, totalLines: 1, @@ -299,10 +300,10 @@ export async function readFileRecord(record: WorkspaceFileRecord): Promise MAX_TEXT_READ_BYTES) { - span.setAttribute('copilot.vfs.read.outcome', 'text_too_large') + span.setAttribute(TraceAttr.CopilotVfsReadOutcome, 'text_too_large') return { content: `[File too large to display inline: ${record.name} (${record.size} bytes, limit ${MAX_TEXT_READ_BYTES})]`, totalLines: 1, @@ -332,16 +333,16 @@ export async function readFileRecord(record: WorkspaceFileRecord): Promise( blockMapping.spanName, { attributes: { - 'block.type': blockType, - 'block.id': blockId, - 'block.name': blockName, + [TraceAttr.BlockType]: blockType, + [TraceAttr.BlockId]: blockId, + [TraceAttr.BlockName]: blockName, }, }, async (span) => { @@ -439,8 +440,8 @@ export function trackPlatformEvent( const span = tracer.startSpan(eventName, { attributes: { ...attributes, - 'event.name': eventName, - 'event.timestamp': Date.now(), + [TraceAttr.EventName]: eventName, + [TraceAttr.EventTimestamp]: Date.now(), }, }) span.setStatus({ code: SpanStatusCode.OK }) diff --git a/package.json b/package.json index fc3439f62a6..4ff2d078209 100644 --- a/package.json +++ b/package.json @@ -29,8 +29,10 @@ "trace-contracts:check": "bun run scripts/sync-request-trace-contract.ts --check", "trace-spans-contract:generate": "bun run scripts/sync-trace-spans-contract.ts", "trace-spans-contract:check": "bun run scripts/sync-trace-spans-contract.ts --check", - "mship:generate": "bun run mship-contracts:generate && bun run mship-tools:generate && bun run trace-contracts:generate && bun run trace-spans-contract:generate", - "mship:check": "bun run mship-contracts:check && bun run mship-tools:check && bun run trace-contracts:check && bun run trace-spans-contract:check", + "trace-attributes-contract:generate": "bun run scripts/sync-trace-attributes-contract.ts", + "trace-attributes-contract:check": "bun run scripts/sync-trace-attributes-contract.ts --check", + "mship:generate": "bun run mship-contracts:generate && bun run mship-tools:generate && bun run trace-contracts:generate && bun run trace-spans-contract:generate && bun run trace-attributes-contract:generate", + "mship:check": "bun run mship-contracts:check && bun run mship-tools:check && bun run trace-contracts:check && bun run trace-spans-contract:check && bun run trace-attributes-contract:check", "prepare": "bun husky", "type-check": "turbo run type-check", "release": "bun run scripts/create-single-release.ts" diff --git a/scripts/sync-trace-attributes-contract.ts b/scripts/sync-trace-attributes-contract.ts new file mode 100644 index 00000000000..3f693781cd3 --- /dev/null +++ b/scripts/sync-trace-attributes-contract.ts @@ -0,0 +1,168 @@ +import { mkdir, readFile, writeFile } from 'node:fs/promises' +import { dirname, resolve } from 'node:path' +import { fileURLToPath } from 'node:url' + +/** + * Generate `apps/sim/lib/copilot/generated/trace-attributes-v1.ts` + * from the Go-side `contracts/trace-attributes-v1.schema.json` + * contract. + * + * The contract is a single-enum JSON Schema listing every CUSTOM + * (non-OTel-semconv) span attribute key used in mothership. We emit: + * - A `TraceAttr` const object keyed by PascalCase identifier whose + * values are the exact wire strings, so call sites look like + * `span.setAttribute(TraceAttr.ChatId, …)` instead of the raw + * `span.setAttribute('chat.id', …)`. + * - A `TraceAttrKey` union and a `TraceAttrValue` union type so + * helpers that take an attribute key are well-typed. + * - A sorted `TraceAttrValues` readonly array for tests/enumeration. + * + * This is the attribute-key twin of `sync-trace-spans-contract.ts` + * (span names). The two files share the enum-extraction + identifier + * PascalCase + collision-detection pattern so a reader who understands + * one understands both. + * + * For OTel semantic-convention keys (e.g. `http.request.method`, + * `db.system`, `gen_ai.system`, `messaging.*`, `net.*`, + * `service.name`, `deployment.environment`), import from + * `@opentelemetry/semantic-conventions` directly — they live in the + * upstream package, not in this contract. + */ +const SCRIPT_DIR = dirname(fileURLToPath(import.meta.url)) +const ROOT = resolve(SCRIPT_DIR, '..') +const DEFAULT_CONTRACT_PATH = resolve( + ROOT, + '../copilot/copilot/contracts/trace-attributes-v1.schema.json', +) +const OUTPUT_PATH = resolve( + ROOT, + 'apps/sim/lib/copilot/generated/trace-attributes-v1.ts', +) + +function extractAttrKeys(schema: Record): string[] { + const defs = (schema.$defs ?? {}) as Record + const nameDef = defs.TraceAttributesV1Name + if ( + !nameDef || + typeof nameDef !== 'object' || + !Array.isArray((nameDef as Record).enum) + ) { + throw new Error( + 'trace-attributes-v1.schema.json is missing $defs.TraceAttributesV1Name.enum', + ) + } + const enumValues = (nameDef as Record).enum as unknown[] + if (!enumValues.every((v) => typeof v === 'string')) { + throw new Error('TraceAttributesV1Name enum must be string-only') + } + return (enumValues as string[]).slice().sort() +} + +/** + * Convert a wire attribute key like `copilot.vfs.input.media_type_claimed` + * into an identifier-safe PascalCase key like + * `CopilotVfsInputMediaTypeClaimed`. + * + * Same algorithm as the span-name sync script so readers can learn one + * and reuse it. + */ +function toIdentifier(name: string): string { + const parts = name.split(/[^A-Za-z0-9]+/).filter(Boolean) + if (parts.length === 0) { + throw new Error(`Cannot derive identifier for attribute key: ${name}`) + } + const ident = parts + .map((p) => p.charAt(0).toUpperCase() + p.slice(1).toLowerCase()) + .join('') + if (/^[0-9]/.test(ident)) { + throw new Error( + `Derived identifier "${ident}" for attribute "${name}" starts with a digit`, + ) + } + return ident +} + +function render(attrKeys: string[]): string { + const pairs = attrKeys.map((name) => ({ name, ident: toIdentifier(name) })) + + // Identifier collisions silently override earlier keys and break + // type safety — fail loudly instead. + const seen = new Map() + for (const p of pairs) { + const prev = seen.get(p.ident) + if (prev && prev !== p.name) { + throw new Error( + `Identifier collision: "${prev}" and "${p.name}" both map to "${p.ident}"`, + ) + } + seen.set(p.ident, p.name) + } + + const constLines = pairs + .map((p) => ` ${p.ident}: ${JSON.stringify(p.name)},`) + .join('\n') + const arrayEntries = attrKeys.map((n) => ` ${JSON.stringify(n)},`).join('\n') + + return `// AUTO-GENERATED FILE. DO NOT EDIT. +// +// Source: copilot/copilot/contracts/trace-attributes-v1.schema.json +// Regenerate with: bun run trace-attributes-contract:generate +// +// Canonical custom mothership OTel span attribute keys. Call sites +// should reference \`TraceAttr.\` (e.g. +// \`TraceAttr.ChatId\`, \`TraceAttr.ToolCallId\`) rather than raw +// string literals, so the Go-side contract is the single source of +// truth and typos become compile errors. +// +// For OTel semantic-convention keys (\`http.*\`, \`db.*\`, +// \`gen_ai.*\`, \`net.*\`, \`messaging.*\`, \`service.*\`, +// \`deployment.environment\`), import from +// \`@opentelemetry/semantic-conventions\` directly — those are owned +// by the upstream OTel spec, not by this contract. + +export const TraceAttr = { +${constLines} +} as const; + +export type TraceAttrKey = keyof typeof TraceAttr; +export type TraceAttrValue = (typeof TraceAttr)[TraceAttrKey]; + +/** Readonly sorted list of every canonical custom attribute key. */ +export const TraceAttrValues: readonly TraceAttrValue[] = [ +${arrayEntries} +] as const; +` +} + +async function main() { + const checkOnly = process.argv.includes('--check') + const inputArg = process.argv.find((a) => a.startsWith('--input=')) + const inputPath = inputArg + ? resolve(ROOT, inputArg.slice('--input='.length)) + : DEFAULT_CONTRACT_PATH + + const raw = await readFile(inputPath, 'utf8') + const schema = JSON.parse(raw) + const attrKeys = extractAttrKeys(schema) + const rendered = render(attrKeys) + + if (checkOnly) { + const existing = await readFile(OUTPUT_PATH, 'utf8').catch(() => null) + if (existing !== rendered) { + throw new Error( + 'Generated trace attributes contract is stale. Run: bun run trace-attributes-contract:generate', + ) + } + console.log('Trace attributes contract is up to date.') + return + } + + await mkdir(dirname(OUTPUT_PATH), { recursive: true }) + await writeFile(OUTPUT_PATH, rendered, 'utf8') + console.log(`Generated trace attributes types -> ${OUTPUT_PATH}`) +} + +main().catch((err) => { + console.error(err) + process.exit(1) +}) From 9b4ca6fb96a5ddf6830011dfd7d611b55a34c71f Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 17 Apr 2026 19:23:27 -0700 Subject: [PATCH 07/10] Otel fixes --- apps/sim/app/api/billing/update-cost/route.ts | 13 +- .../api/copilot/api-keys/validate/route.ts | 13 +- apps/sim/app/api/copilot/chat/abort/route.ts | 11 +- apps/sim/app/api/copilot/chat/stop/route.ts | 14 +- apps/sim/app/api/copilot/chat/stream/route.ts | 47 +- apps/sim/app/api/copilot/confirm/route.ts | 17 +- .../[workspaceId]/home/hooks/use-chat.ts | 57 +- apps/sim/lib/copilot/chat/post.ts | 17 +- apps/sim/lib/copilot/chat/terminal-state.ts | 3 +- .../generated/mothership-stream-v1-schema.ts | 3006 +++++----- .../copilot/generated/mothership-stream-v1.ts | 746 ++- .../lib/copilot/generated/request-trace-v1.ts | 141 +- .../lib/copilot/generated/tool-catalog-v1.ts | 3660 +++--------- .../lib/copilot/generated/tool-schemas-v1.ts | 4972 +++++++++-------- .../generated/trace-attribute-values-v1.ts | 350 ++ .../copilot/generated/trace-attributes-v1.ts | 1902 +++---- .../lib/copilot/generated/trace-events-v1.ts | 44 + .../lib/copilot/generated/trace-spans-v1.ts | 266 +- apps/sim/lib/copilot/request/go/fetch.ts | 3 +- .../lib/copilot/request/lifecycle/start.ts | 97 +- apps/sim/lib/copilot/request/otel.ts | 12 +- apps/sim/lib/copilot/request/session/abort.ts | 84 +- .../copilot/request/session/explicit-abort.ts | 6 +- apps/sim/lib/copilot/request/session/index.ts | 3 + .../lib/copilot/request/session/recovery.ts | 5 +- apps/sim/lib/copilot/request/tools/files.ts | 5 +- apps/sim/lib/copilot/request/tools/tables.ts | 30 +- .../tools/client/run-tool-execution.ts | 8 +- .../lib/copilot/tools/client/trace-context.ts | 58 + apps/sim/lib/copilot/vfs/file-reader.ts | 32 +- package.json | 8 +- .../sync-trace-attribute-values-contract.ts | 155 + scripts/sync-trace-events-contract.ts | 137 + 33 files changed, 7657 insertions(+), 8265 deletions(-) create mode 100644 apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts create mode 100644 apps/sim/lib/copilot/generated/trace-events-v1.ts create mode 100644 apps/sim/lib/copilot/tools/client/trace-context.ts create mode 100644 scripts/sync-trace-attribute-values-contract.ts create mode 100644 scripts/sync-trace-events-contract.ts diff --git a/apps/sim/app/api/billing/update-cost/route.ts b/apps/sim/app/api/billing/update-cost/route.ts index 4799b485221..222c9ed5375 100644 --- a/apps/sim/app/api/billing/update-cost/route.ts +++ b/apps/sim/app/api/billing/update-cost/route.ts @@ -11,6 +11,7 @@ import { withIncomingGoSpan } from '@/lib/copilot/request/otel' import { isBillingEnabled } from '@/lib/core/config/feature-flags' import { type AtomicClaimResult, billingIdempotency } from '@/lib/core/idempotency/service' import { generateRequestId } from '@/lib/core/utils/request' +import { BillingRouteOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('BillingUpdateCostAPI') @@ -60,7 +61,7 @@ async function updateCostInner( logger.info(`[${requestId}] Update cost request started`) if (!isBillingEnabled) { - span.setAttribute(TraceAttr.BillingOutcome, 'billing_disabled') + span.setAttribute(TraceAttr.BillingOutcome, BillingRouteOutcome.BillingDisabled) span.setAttribute(TraceAttr.HttpStatusCode, 200) return NextResponse.json({ success: true, @@ -77,7 +78,7 @@ async function updateCostInner( const authResult = checkInternalApiKey(req) if (!authResult.success) { logger.warn(`[${requestId}] Authentication failed: ${authResult.error}`) - span.setAttribute(TraceAttr.BillingOutcome, 'auth_failed') + span.setAttribute(TraceAttr.BillingOutcome, BillingRouteOutcome.AuthFailed) span.setAttribute(TraceAttr.HttpStatusCode, 401) return NextResponse.json( { @@ -96,7 +97,7 @@ async function updateCostInner( errors: validation.error.issues, body, }) - span.setAttribute(TraceAttr.BillingOutcome, 'invalid_body') + span.setAttribute(TraceAttr.BillingOutcome, BillingRouteOutcome.InvalidBody) span.setAttribute(TraceAttr.HttpStatusCode, 400) return NextResponse.json( { @@ -133,7 +134,7 @@ async function updateCostInner( userId, source, }) - span.setAttribute(TraceAttr.BillingOutcome, 'duplicate_idempotency_key') + span.setAttribute(TraceAttr.BillingOutcome, BillingRouteOutcome.DuplicateIdempotencyKey) span.setAttribute(TraceAttr.HttpStatusCode, 409) return NextResponse.json( { @@ -199,7 +200,7 @@ async function updateCostInner( cost, }) - span.setAttribute(TraceAttr.BillingOutcome, 'billed') + span.setAttribute(TraceAttr.BillingOutcome, BillingRouteOutcome.Billed) span.setAttribute(TraceAttr.HttpStatusCode, 200) span.setAttribute(TraceAttr.BillingDurationMs, duration) return NextResponse.json({ @@ -236,7 +237,7 @@ async function updateCostInner( ) } - span.setAttribute(TraceAttr.BillingOutcome, 'internal_error') + span.setAttribute(TraceAttr.BillingOutcome, BillingRouteOutcome.InternalError) span.setAttribute(TraceAttr.HttpStatusCode, 500) span.setAttribute(TraceAttr.BillingDurationMs, duration) return NextResponse.json( diff --git a/apps/sim/app/api/copilot/api-keys/validate/route.ts b/apps/sim/app/api/copilot/api-keys/validate/route.ts index 6852881c570..85aad942c4b 100644 --- a/apps/sim/app/api/copilot/api-keys/validate/route.ts +++ b/apps/sim/app/api/copilot/api-keys/validate/route.ts @@ -9,6 +9,7 @@ import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { checkInternalApiKey } from '@/lib/copilot/request/http' import { withIncomingGoSpan } from '@/lib/copilot/request/otel' +import { CopilotValidateOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('CopilotApiKeysValidate') @@ -32,7 +33,7 @@ export async function POST(req: NextRequest) { try { const auth = checkInternalApiKey(req) if (!auth.success) { - span.setAttribute(TraceAttr.CopilotValidateOutcome, 'internal_auth_failed') + span.setAttribute(TraceAttr.CopilotValidateOutcome, CopilotValidateOutcome.InternalAuthFailed) span.setAttribute(TraceAttr.HttpStatusCode, 401) return new NextResponse(null, { status: 401 }) } @@ -41,7 +42,7 @@ export async function POST(req: NextRequest) { const validationResult = ValidateApiKeySchema.safeParse(body) if (!validationResult.success) { logger.warn('Invalid validation request', { errors: validationResult.error.errors }) - span.setAttribute(TraceAttr.CopilotValidateOutcome, 'invalid_body') + span.setAttribute(TraceAttr.CopilotValidateOutcome, CopilotValidateOutcome.InvalidBody) span.setAttribute(TraceAttr.HttpStatusCode, 400) return NextResponse.json( { @@ -58,7 +59,7 @@ export async function POST(req: NextRequest) { const [existingUser] = await db.select().from(user).where(eq(user.id, userId)).limit(1) if (!existingUser) { logger.warn('[API VALIDATION] userId does not exist', { userId }) - span.setAttribute(TraceAttr.CopilotValidateOutcome, 'user_not_found') + span.setAttribute(TraceAttr.CopilotValidateOutcome, CopilotValidateOutcome.UserNotFound) span.setAttribute(TraceAttr.HttpStatusCode, 403) return NextResponse.json({ error: 'User not found' }, { status: 403 }) } @@ -80,17 +81,17 @@ export async function POST(req: NextRequest) { if (isExceeded) { logger.info('[API VALIDATION] Usage exceeded', { userId, currentUsage, limit }) - span.setAttribute(TraceAttr.CopilotValidateOutcome, 'usage_exceeded') + span.setAttribute(TraceAttr.CopilotValidateOutcome, CopilotValidateOutcome.UsageExceeded) span.setAttribute(TraceAttr.HttpStatusCode, 402) return new NextResponse(null, { status: 402 }) } - span.setAttribute(TraceAttr.CopilotValidateOutcome, 'ok') + span.setAttribute(TraceAttr.CopilotValidateOutcome, CopilotValidateOutcome.Ok) span.setAttribute(TraceAttr.HttpStatusCode, 200) return new NextResponse(null, { status: 200 }) } catch (error) { logger.error('Error validating usage limit', { error }) - span.setAttribute(TraceAttr.CopilotValidateOutcome, 'internal_error') + span.setAttribute(TraceAttr.CopilotValidateOutcome, CopilotValidateOutcome.InternalError) span.setAttribute(TraceAttr.HttpStatusCode, 500) return NextResponse.json({ error: 'Failed to validate usage' }, { status: 500 }) } diff --git a/apps/sim/app/api/copilot/chat/abort/route.ts b/apps/sim/app/api/copilot/chat/abort/route.ts index 46c003c1a9e..7fd8e2493a3 100644 --- a/apps/sim/app/api/copilot/chat/abort/route.ts +++ b/apps/sim/app/api/copilot/chat/abort/route.ts @@ -9,6 +9,7 @@ import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/htt import { withCopilotSpan, withIncomingGoSpan } from '@/lib/copilot/request/otel' import { abortActiveStream, waitForPendingChatStream } from '@/lib/copilot/request/session' import { env } from '@/lib/core/config/env' +import { CopilotAbortOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('CopilotChatAbortAPI') const GO_EXPLICIT_ABORT_TIMEOUT_MS = 3000 @@ -34,7 +35,7 @@ export async function POST(request: Request) { await authenticateCopilotRequestSessionOnly() if (!isAuthenticated || !authenticatedUserId) { - rootSpan.setAttribute(TraceAttr.CopilotAbortOutcome, 'unauthorized') + rootSpan.setAttribute(TraceAttr.CopilotAbortOutcome, CopilotAbortOutcome.Unauthorized) return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) } @@ -48,7 +49,7 @@ export async function POST(request: Request) { let chatId = typeof body.chatId === 'string' ? body.chatId : '' if (!streamId) { - rootSpan.setAttribute(TraceAttr.CopilotAbortOutcome, 'missing_stream_id') + rootSpan.setAttribute(TraceAttr.CopilotAbortOutcome, CopilotAbortOutcome.MissingStreamId) return NextResponse.json({ error: 'streamId is required' }, { status: 400 }) } rootSpan.setAttributes({ @@ -139,17 +140,17 @@ export async function POST(request: Request) { } ) if (!settled) { - rootSpan.setAttribute(TraceAttr.CopilotAbortOutcome, 'settle_timeout') + rootSpan.setAttribute(TraceAttr.CopilotAbortOutcome, CopilotAbortOutcome.SettleTimeout) return NextResponse.json( { error: 'Previous response is still shutting down', aborted, settled: false }, { status: 409 } ) } - rootSpan.setAttribute(TraceAttr.CopilotAbortOutcome, 'settled') + rootSpan.setAttribute(TraceAttr.CopilotAbortOutcome, CopilotAbortOutcome.Settled) return NextResponse.json({ aborted, settled: true }) } - rootSpan.setAttribute(TraceAttr.CopilotAbortOutcome, 'no_chat_id') + rootSpan.setAttribute(TraceAttr.CopilotAbortOutcome, CopilotAbortOutcome.NoChatId) return NextResponse.json({ aborted }) } ) diff --git a/apps/sim/app/api/copilot/chat/stop/route.ts b/apps/sim/app/api/copilot/chat/stop/route.ts index 71708838ff9..a0a0cb68570 100644 --- a/apps/sim/app/api/copilot/chat/stop/route.ts +++ b/apps/sim/app/api/copilot/chat/stop/route.ts @@ -11,6 +11,7 @@ import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withIncomingGoSpan } from '@/lib/copilot/request/otel' import { taskPubSub } from '@/lib/copilot/tasks' import { generateId } from '@/lib/core/utils/uuid' +import { CopilotStopOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('CopilotChatStopAPI') @@ -79,7 +80,7 @@ export async function POST(req: NextRequest) { try { const session = await getSession() if (!session?.user?.id) { - span.setAttribute(TraceAttr.CopilotStopOutcome, 'unauthorized') + span.setAttribute(TraceAttr.CopilotStopOutcome, CopilotStopOutcome.Unauthorized) return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) } @@ -102,7 +103,7 @@ export async function POST(req: NextRequest) { .limit(1) if (!row) { - span.setAttribute(TraceAttr.CopilotStopOutcome, 'chat_not_found') + span.setAttribute(TraceAttr.CopilotStopOutcome, CopilotStopOutcome.ChatNotFound) return NextResponse.json({ success: true }) } @@ -160,15 +161,18 @@ export async function POST(req: NextRequest) { }) } - span.setAttribute(TraceAttr.CopilotStopOutcome, updated ? 'persisted' : 'no_matching_row') + span.setAttribute( + TraceAttr.CopilotStopOutcome, + updated ? CopilotStopOutcome.Persisted : CopilotStopOutcome.NoMatchingRow + ) return NextResponse.json({ success: true }) } catch (error) { if (error instanceof z.ZodError) { - span.setAttribute(TraceAttr.CopilotStopOutcome, 'validation_error') + span.setAttribute(TraceAttr.CopilotStopOutcome, CopilotStopOutcome.ValidationError) return NextResponse.json({ error: 'Invalid request' }, { status: 400 }) } logger.error('Error stopping chat stream:', error) - span.setAttribute(TraceAttr.CopilotStopOutcome, 'internal_error') + span.setAttribute(TraceAttr.CopilotStopOutcome, CopilotStopOutcome.InternalError) return NextResponse.json({ error: 'Internal server error' }, { status: 500 }) } } diff --git a/apps/sim/app/api/copilot/chat/stream/route.ts b/apps/sim/app/api/copilot/chat/stream/route.ts index 47ae09984f0..bfeb2a5c3d9 100644 --- a/apps/sim/app/api/copilot/chat/stream/route.ts +++ b/apps/sim/app/api/copilot/chat/stream/route.ts @@ -7,6 +7,11 @@ import { MothershipStreamV1EventType, } from '@/lib/copilot/generated/mothership-stream-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' +import { + CopilotResumeOutcome, + CopilotTransport, +} from '@/lib/copilot/generated/trace-attribute-values-v1' +import { contextFromRequestHeaders } from '@/lib/copilot/request/go/propagation' import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http' import { getCopilotTracer } from '@/lib/copilot/request/otel' import { @@ -125,15 +130,31 @@ export async function GET(request: NextRequest) { // manually, capture its context, and re-enter that context inside the // stream callback so every nested `withCopilotSpan` / `withDbSpan` call // attaches to this root. - const rootSpan = getCopilotTracer().startSpan('copilot.resume.request', { - attributes: { - [TraceAttr.CopilotTransport]: batchMode ? 'batch' : 'stream', - [TraceAttr.StreamId]: streamId, - [TraceAttr.UserId]: authenticatedUserId, - [TraceAttr.CopilotResumeAfterCursor]: afterCursor || '0', + // + // `contextFromRequestHeaders` extracts the W3C `traceparent` the + // client echoed (set via `streamTraceparentRef` on Sim's chat POST + // response), so the resume span becomes a child of the original + // chat's `gen_ai.agent.execute` trace instead of a disconnected + // new root. On reconnects after page reload (client ref was wiped) + // the header is absent and extraction leaves the ambient context + // alone → the resume span becomes its own root. Same as pre- + // linking behavior; no regression. + const incomingContext = contextFromRequestHeaders(request.headers) + const rootSpan = getCopilotTracer().startSpan( + 'copilot.resume.request', + { + attributes: { + [TraceAttr.CopilotTransport]: batchMode + ? CopilotTransport.Batch + : CopilotTransport.Stream, + [TraceAttr.StreamId]: streamId, + [TraceAttr.UserId]: authenticatedUserId, + [TraceAttr.CopilotResumeAfterCursor]: afterCursor || '0', + }, }, - }) - const rootContext = trace.setSpan(otelContext.active(), rootSpan) + incomingContext + ) + const rootContext = trace.setSpan(incomingContext, rootSpan) try { return await otelContext.with(rootContext, () => @@ -190,7 +211,7 @@ async function handleResumeRequestBody({ runStatus: run?.status, }) if (!run) { - rootSpan.setAttribute(TraceAttr.CopilotResumeOutcome, 'stream_not_found') + rootSpan.setAttribute(TraceAttr.CopilotResumeOutcome, CopilotResumeOutcome.StreamNotFound) rootSpan.end() return NextResponse.json({ error: 'Stream not found' }, { status: 404 }) } @@ -217,7 +238,7 @@ async function handleResumeRequestBody({ runStatus: run.status, }) rootSpan.setAttributes({ - [TraceAttr.CopilotResumeOutcome]: 'batch_delivered', + [TraceAttr.CopilotResumeOutcome]: CopilotResumeOutcome.BatchDelivered, [TraceAttr.CopilotResumeEventCount]: batchEvents.length, [TraceAttr.CopilotResumePreviewSessionCount]: previewSessions.length, }) @@ -411,10 +432,10 @@ async function handleResumeRequestBody({ closeController() rootSpan.setAttributes({ [TraceAttr.CopilotResumeOutcome]: sawTerminalEvent - ? 'terminal_delivered' + ? CopilotResumeOutcome.TerminalDelivered : controllerClosed - ? 'client_disconnected' - : 'ended_without_terminal', + ? CopilotResumeOutcome.ClientDisconnected + : CopilotResumeOutcome.EndedWithoutTerminal, [TraceAttr.CopilotResumeEventCount]: totalEventsFlushed, [TraceAttr.CopilotResumePollIterations]: pollIterations, [TraceAttr.CopilotResumeDurationMs]: Date.now() - startTime, diff --git a/apps/sim/app/api/copilot/confirm/route.ts b/apps/sim/app/api/copilot/confirm/route.ts index a8734d74d2b..b646ea7cc3b 100644 --- a/apps/sim/app/api/copilot/confirm/route.ts +++ b/apps/sim/app/api/copilot/confirm/route.ts @@ -25,6 +25,7 @@ import { createUnauthorizedResponse, } from '@/lib/copilot/request/http' import { withIncomingGoSpan } from '@/lib/copilot/request/otel' +import { CopilotConfirmOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('CopilotConfirmAPI') @@ -140,7 +141,7 @@ export async function POST(req: NextRequest) { await authenticateCopilotRequestSessionOnly() if (!isAuthenticated || !authenticatedUserId) { - span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'unauthorized') + span.setAttribute(TraceAttr.CopilotConfirmOutcome, CopilotConfirmOutcome.Unauthorized) return createUnauthorizedResponse() } @@ -161,7 +162,7 @@ export async function POST(req: NextRequest) { }) if (!existing) { - span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'tool_call_not_found') + span.setAttribute(TraceAttr.CopilotConfirmOutcome, CopilotConfirmOutcome.ToolCallNotFound) return createNotFoundResponse('Tool call not found') } if (existing.toolName) span.setAttribute(TraceAttr.ToolName, existing.toolName) @@ -175,11 +176,11 @@ export async function POST(req: NextRequest) { return null }) if (!run) { - span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'run_not_found') + span.setAttribute(TraceAttr.CopilotConfirmOutcome, CopilotConfirmOutcome.RunNotFound) return createNotFoundResponse('Tool call run not found') } if (run.userId !== authenticatedUserId) { - span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'forbidden') + span.setAttribute(TraceAttr.CopilotConfirmOutcome, CopilotConfirmOutcome.Forbidden) return NextResponse.json({ error: 'Forbidden' }, { status: 403 }) } @@ -193,13 +194,13 @@ export async function POST(req: NextRequest) { internalStatus: status, message, }) - span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'update_failed') + span.setAttribute(TraceAttr.CopilotConfirmOutcome, CopilotConfirmOutcome.UpdateFailed) return createBadRequestResponse( 'Failed to update tool call status or tool call not found' ) } - span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'delivered') + span.setAttribute(TraceAttr.CopilotConfirmOutcome, CopilotConfirmOutcome.Delivered) return NextResponse.json({ success: true, message: message || `Tool call ${toolCallId} has been ${status.toLowerCase()}`, @@ -214,7 +215,7 @@ export async function POST(req: NextRequest) { duration, errors: error.errors, }) - span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'validation_error') + span.setAttribute(TraceAttr.CopilotConfirmOutcome, CopilotConfirmOutcome.ValidationError) return createBadRequestResponse( `Invalid request data: ${error.errors.map((e) => e.message).join(', ')}` ) @@ -226,7 +227,7 @@ export async function POST(req: NextRequest) { stack: error instanceof Error ? error.stack : undefined, }) - span.setAttribute(TraceAttr.CopilotConfirmOutcome, 'internal_error') + span.setAttribute(TraceAttr.CopilotConfirmOutcome, CopilotConfirmOutcome.InternalError) return createInternalServerErrorResponse( error instanceof Error ? error.message : 'Internal server error' ) diff --git a/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts b/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts index 8575e9a1b44..f1b411942eb 100644 --- a/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts +++ b/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts @@ -85,6 +85,7 @@ import { markRunToolManuallyStopped, reportManualRunToolStop, } from '@/lib/copilot/tools/client/run-tool-execution' +import { setCurrentChatTraceparent } from '@/lib/copilot/tools/client/trace-context' import { isWorkflowToolName } from '@/lib/copilot/tools/workflow-tools' import { generateId } from '@/lib/core/utils/uuid' import { getNextWorkflowColor } from '@/lib/workflows/colors' @@ -1271,6 +1272,13 @@ export function useChat( const activeTurnRef = useRef(null) const pendingUserMsgRef = useRef(null) const streamIdRef = useRef(undefined) + // W3C traceparent for the currently-streaming chat request. Sim's + // chat POST response returns this header built from the root + // gen_ai.agent.execute span; we echo it on every side-channel + // request (abort/stop/confirm/stream-replay) so they appear as + // child spans of the same trace instead of disconnected roots. + // Cleared when a new chat starts (overwritten by the next POST). + const streamTraceparentRef = useRef(undefined) const locallyTerminalStreamIdRef = useRef(undefined) const lastCursorRef = useRef('0') const sendingRef = useRef(false) @@ -2528,7 +2536,17 @@ export function useChat( ): Promise => { const response = await fetch( `/api/mothership/chat/stream?streamId=${encodeURIComponent(streamId)}&after=${encodeURIComponent(afterCursor)}&batch=true`, - { signal } + { + signal, + // Propagate the original chat trace so batch-replay spans + // nest under the same trace as the chat POST. Empty on + // page-reload reconnects (stored ref was wiped), in which + // case the resume handler starts its own root — unchanged + // from pre-linking behavior. + ...(streamTraceparentRef.current + ? { headers: { traceparent: streamTraceparentRef.current } } + : {}), + } ) if (!response.ok) { throw new Error(`Stream resume batch failed: ${response.status}`) @@ -2599,7 +2617,12 @@ export function useChat( const sseRes = await fetch( `/api/mothership/chat/stream?streamId=${encodeURIComponent(streamId)}&after=${encodeURIComponent(latestCursor)}`, - { signal: activeAbort.signal } + { + signal: activeAbort.signal, + ...(streamTraceparentRef.current + ? { headers: { traceparent: streamTraceparentRef.current } } + : {}), + } ) if (!sseRes.ok || !sseRes.body) { throw new Error(RECONNECT_TAIL_ERROR) @@ -2878,7 +2901,12 @@ export function useChat( try { const res = await fetch(stopPathRef.current, { method: 'POST', - headers: { 'Content-Type': 'application/json' }, + headers: { + 'Content-Type': 'application/json', + ...(streamTraceparentRef.current + ? { traceparent: streamTraceparentRef.current } + : {}), + }, body: JSON.stringify({ chatId, streamId, @@ -3160,6 +3188,22 @@ export function useChat( signal: abortController.signal, }) + // Capture the server's root trace identity so we can propagate + // it on every subsequent side-channel call for this stream. + // See `streamTraceparentRef` comment above for full rationale. + // Fine to read even on non-ok responses — Sim still sets the + // header before validation fails so error traces are linked + // too; we just won't use it in that case because we return + // early below. Also mirror it into the module-level client- + // tool trace-context holder so tool-completion callbacks + // fired from non-React code paths (e.g. workflow runner) can + // echo it without having to thread a prop through. + const traceparent = response.headers.get('traceparent') + if (traceparent) { + streamTraceparentRef.current = traceparent + setCurrentChatTraceparent(traceparent) + } + if (!response.ok) { const errorData = await response.json().catch(() => ({})) if (response.status === 409) { @@ -3460,7 +3504,12 @@ export function useChat( ? (async () => { const res = await fetch('/api/mothership/chat/abort', { method: 'POST', - headers: { 'Content-Type': 'application/json' }, + headers: { + 'Content-Type': 'application/json', + ...(streamTraceparentRef.current + ? { traceparent: streamTraceparentRef.current } + : {}), + }, body: JSON.stringify({ streamId: sid, ...(resolvedChatId ? { chatId: resolvedChatId } : {}), diff --git a/apps/sim/lib/copilot/chat/post.ts b/apps/sim/lib/copilot/chat/post.ts index d8aaacb7df3..c3889e158aa 100644 --- a/apps/sim/lib/copilot/chat/post.ts +++ b/apps/sim/lib/copilot/chat/post.ts @@ -925,7 +925,22 @@ export async function handleUnifiedChatPost(req: NextRequest) { }, }) - return new Response(stream, { headers: SSE_RESPONSE_HEADERS }) + // Expose the root gen_ai.agent.execute span's trace identity to + // the browser so subsequent HTTP calls (stop, abort, confirm, + // SSE reconnect) can echo it back as `traceparent` — making + // all side-channel work on this request appear as child spans + // of this same trace in Tempo instead of disconnected roots. + // W3C traceparent format: `00---`. + const rootCtx = otelRoot!.span.spanContext() + const rootTraceparent = `00-${rootCtx.traceId}-${rootCtx.spanId}-${ + (rootCtx.traceFlags & 0x1) === 0x1 ? '01' : '00' + }` + return new Response(stream, { + headers: { + ...SSE_RESPONSE_HEADERS, + traceparent: rootTraceparent, + }, + }) }) // end otelContextApi.with } catch (error) { if (chatStreamLockAcquired && actualChatId && userMessageId) { diff --git a/apps/sim/lib/copilot/chat/terminal-state.ts b/apps/sim/lib/copilot/chat/terminal-state.ts index 6bcb987192b..e620dc27957 100644 --- a/apps/sim/lib/copilot/chat/terminal-state.ts +++ b/apps/sim/lib/copilot/chat/terminal-state.ts @@ -5,6 +5,7 @@ import type { PersistedMessage } from '@/lib/copilot/chat/persisted-message' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' +import { CopilotChatFinalizeOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' interface FinalizeAssistantTurnParams { chatId: string @@ -65,7 +66,7 @@ export async function finalizeAssistantTurn({ messages: sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb`, }) .where(updateWhere) - span.setAttribute(TraceAttr.ChatFinalizeOutcome, 'appended_assistant') + span.setAttribute(TraceAttr.ChatFinalizeOutcome, CopilotChatFinalizeOutcome.AppendedAssistant) return } diff --git a/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts b/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts index 1c670b37b54..b702b8de3db 100644 --- a/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts +++ b/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts @@ -5,1421 +5,1849 @@ export type JsonSchema = unknown export const MOTHERSHIP_STREAM_V1_SCHEMA: JsonSchema = { - $defs: { - MothershipStreamV1AdditionalPropertiesMap: { - additionalProperties: true, - type: 'object', - }, - MothershipStreamV1AsyncToolRecordStatus: { - enum: ['pending', 'running', 'completed', 'failed', 'cancelled', 'delivered'], - type: 'string', - }, - MothershipStreamV1CheckpointPauseEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1CheckpointPausePayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['run'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "$defs": { + "MothershipStreamV1AdditionalPropertiesMap": { + "additionalProperties": true, + "type": "object" + }, + "MothershipStreamV1AsyncToolRecordStatus": { + "enum": [ + "pending", + "running", + "completed", + "failed", + "cancelled", + "delivered" + ], + "type": "string" + }, + "MothershipStreamV1CheckpointPauseEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1CheckpointPausePayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "run" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1CheckpointPauseFrame: { - additionalProperties: false, - properties: { - parentToolCallId: { - type: 'string', - }, - parentToolName: { - type: 'string', - }, - pendingToolIds: { - items: { - type: 'string', + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1CheckpointPauseFrame": { + "additionalProperties": false, + "properties": { + "parentToolCallId": { + "type": "string" + }, + "parentToolName": { + "type": "string" + }, + "pendingToolIds": { + "items": { + "type": "string" }, - type: 'array', - }, + "type": "array" + } }, - required: ['parentToolCallId', 'parentToolName', 'pendingToolIds'], - type: 'object', - }, - MothershipStreamV1CheckpointPausePayload: { - additionalProperties: false, - properties: { - checkpointId: { - type: 'string', - }, - executionId: { - type: 'string', - }, - frames: { - items: { - $ref: '#/$defs/MothershipStreamV1CheckpointPauseFrame', + "required": [ + "parentToolCallId", + "parentToolName", + "pendingToolIds" + ], + "type": "object" + }, + "MothershipStreamV1CheckpointPausePayload": { + "additionalProperties": false, + "properties": { + "checkpointId": { + "type": "string" + }, + "executionId": { + "type": "string" + }, + "frames": { + "items": { + "$ref": "#/$defs/MothershipStreamV1CheckpointPauseFrame" }, - type: 'array', + "type": "array" }, - kind: { - enum: ['checkpoint_pause'], - type: 'string', + "kind": { + "enum": [ + "checkpoint_pause" + ], + "type": "string" }, - pendingToolCallIds: { - items: { - type: 'string', + "pendingToolCallIds": { + "items": { + "type": "string" }, - type: 'array', - }, - runId: { - type: 'string', + "type": "array" }, + "runId": { + "type": "string" + } }, - required: ['kind', 'checkpointId', 'runId', 'executionId', 'pendingToolCallIds'], - type: 'object', - }, - MothershipStreamV1CompactionDoneData: { - additionalProperties: false, - properties: { - summary_chars: { - type: 'integer', - }, + "required": [ + "kind", + "checkpointId", + "runId", + "executionId", + "pendingToolCallIds" + ], + "type": "object" + }, + "MothershipStreamV1CompactionDoneData": { + "additionalProperties": false, + "properties": { + "summary_chars": { + "type": "integer" + } }, - required: ['summary_chars'], - type: 'object', - }, - MothershipStreamV1CompactionDoneEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1CompactionDonePayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['run'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "summary_chars" + ], + "type": "object" + }, + "MothershipStreamV1CompactionDoneEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1CompactionDonePayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "run" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1CompactionDonePayload: { - additionalProperties: false, - properties: { - data: { - $ref: '#/$defs/MothershipStreamV1CompactionDoneData', - }, - kind: { - enum: ['compaction_done'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1CompactionDonePayload": { + "additionalProperties": false, + "properties": { + "data": { + "$ref": "#/$defs/MothershipStreamV1CompactionDoneData" + }, + "kind": { + "enum": [ + "compaction_done" + ], + "type": "string" + } }, - required: ['kind'], - type: 'object', - }, - MothershipStreamV1CompactionStartEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1CompactionStartPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['run'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind" + ], + "type": "object" + }, + "MothershipStreamV1CompactionStartEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1CompactionStartPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "run" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1CompactionStartPayload: { - additionalProperties: false, - properties: { - kind: { - enum: ['compaction_start'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1CompactionStartPayload": { + "additionalProperties": false, + "properties": { + "kind": { + "enum": [ + "compaction_start" + ], + "type": "string" + } }, - required: ['kind'], - type: 'object', - }, - MothershipStreamV1CompleteEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1CompletePayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['complete'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind" + ], + "type": "object" + }, + "MothershipStreamV1CompleteEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1CompletePayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "complete" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1CompletePayload: { - additionalProperties: false, - properties: { - cost: { - $ref: '#/$defs/MothershipStreamV1CostData', - }, - reason: { - type: 'string', - }, - response: true, - status: { - $ref: '#/$defs/MothershipStreamV1CompletionStatus', - }, - usage: { - $ref: '#/$defs/MothershipStreamV1UsageData', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1CompletePayload": { + "additionalProperties": false, + "properties": { + "cost": { + "$ref": "#/$defs/MothershipStreamV1CostData" + }, + "reason": { + "type": "string" + }, + "response": true, + "status": { + "$ref": "#/$defs/MothershipStreamV1CompletionStatus" + }, + "usage": { + "$ref": "#/$defs/MothershipStreamV1UsageData" + } }, - required: ['status'], - type: 'object', - }, - MothershipStreamV1CompletionStatus: { - enum: ['complete', 'error', 'cancelled'], - type: 'string', - }, - MothershipStreamV1CostData: { - additionalProperties: false, - properties: { - input: { - type: 'number', - }, - output: { - type: 'number', - }, - total: { - type: 'number', - }, + "required": [ + "status" + ], + "type": "object" + }, + "MothershipStreamV1CompletionStatus": { + "enum": [ + "complete", + "error", + "cancelled" + ], + "type": "string" + }, + "MothershipStreamV1CostData": { + "additionalProperties": false, + "properties": { + "input": { + "type": "number" + }, + "output": { + "type": "number" + }, + "total": { + "type": "number" + } }, - type: 'object', + "type": "object" }, - MothershipStreamV1ErrorEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1ErrorPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', + "MothershipStreamV1ErrorEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1ErrorPayload" }, - seq: { - type: 'integer', + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', + "seq": { + "type": "integer" }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" }, - ts: { - type: 'string', + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" }, - type: { - enum: ['error'], - type: 'string', + "ts": { + "type": "string" }, - v: { - enum: [1], - type: 'integer', + "type": { + "enum": [ + "error" + ], + "type": "string" }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1ErrorPayload: { - additionalProperties: false, - properties: { - code: { - type: 'string', - }, - data: true, - displayMessage: { - type: 'string', - }, - error: { - type: 'string', - }, - message: { - type: 'string', - }, - provider: { - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1ErrorPayload": { + "additionalProperties": false, + "properties": { + "code": { + "type": "string" + }, + "data": true, + "displayMessage": { + "type": "string" + }, + "error": { + "type": "string" + }, + "message": { + "type": "string" + }, + "provider": { + "type": "string" + } }, - required: ['message'], - type: 'object', - }, - MothershipStreamV1EventEnvelopeCommon: { - additionalProperties: false, - properties: { - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "message" + ], + "type": "object" + }, + "MothershipStreamV1EventEnvelopeCommon": { + "additionalProperties": false, + "properties": { + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream'], - type: 'object', - }, - MothershipStreamV1EventType: { - enum: ['session', 'text', 'tool', 'span', 'resource', 'run', 'error', 'complete'], - type: 'string', - }, - MothershipStreamV1ResourceDescriptor: { - additionalProperties: false, - properties: { - id: { - type: 'string', - }, - title: { - type: 'string', - }, - type: { - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream" + ], + "type": "object" + }, + "MothershipStreamV1EventType": { + "enum": [ + "session", + "text", + "tool", + "span", + "resource", + "run", + "error", + "complete" + ], + "type": "string" + }, + "MothershipStreamV1ResourceDescriptor": { + "additionalProperties": false, + "properties": { + "id": { + "type": "string" + }, + "title": { + "type": "string" + }, + "type": { + "type": "string" + } }, - required: ['type', 'id'], - type: 'object', - }, - MothershipStreamV1ResourceOp: { - enum: ['upsert', 'remove'], - type: 'string', - }, - MothershipStreamV1ResourceRemoveEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1ResourceRemovePayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['resource'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "type", + "id" + ], + "type": "object" + }, + "MothershipStreamV1ResourceOp": { + "enum": [ + "upsert", + "remove" + ], + "type": "string" + }, + "MothershipStreamV1ResourceRemoveEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1ResourceRemovePayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "resource" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1ResourceRemovePayload: { - additionalProperties: false, - properties: { - op: { - enum: ['remove'], - type: 'string', - }, - resource: { - $ref: '#/$defs/MothershipStreamV1ResourceDescriptor', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1ResourceRemovePayload": { + "additionalProperties": false, + "properties": { + "op": { + "enum": [ + "remove" + ], + "type": "string" + }, + "resource": { + "$ref": "#/$defs/MothershipStreamV1ResourceDescriptor" + } }, - required: ['op', 'resource'], - type: 'object', - }, - MothershipStreamV1ResourceUpsertEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1ResourceUpsertPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['resource'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "op", + "resource" + ], + "type": "object" + }, + "MothershipStreamV1ResourceUpsertEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1ResourceUpsertPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "resource" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1ResourceUpsertPayload: { - additionalProperties: false, - properties: { - op: { - enum: ['upsert'], - type: 'string', - }, - resource: { - $ref: '#/$defs/MothershipStreamV1ResourceDescriptor', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1ResourceUpsertPayload": { + "additionalProperties": false, + "properties": { + "op": { + "enum": [ + "upsert" + ], + "type": "string" + }, + "resource": { + "$ref": "#/$defs/MothershipStreamV1ResourceDescriptor" + } }, - required: ['op', 'resource'], - type: 'object', - }, - MothershipStreamV1ResumeRequest: { - additionalProperties: false, - properties: { - checkpointId: { - type: 'string', - }, - results: { - items: { - $ref: '#/$defs/MothershipStreamV1ResumeToolResult', + "required": [ + "op", + "resource" + ], + "type": "object" + }, + "MothershipStreamV1ResumeRequest": { + "additionalProperties": false, + "properties": { + "checkpointId": { + "type": "string" + }, + "results": { + "items": { + "$ref": "#/$defs/MothershipStreamV1ResumeToolResult" }, - type: 'array', - }, - streamId: { - type: 'string', + "type": "array" }, + "streamId": { + "type": "string" + } }, - required: ['streamId', 'checkpointId', 'results'], - type: 'object', - }, - MothershipStreamV1ResumeToolResult: { - additionalProperties: false, - properties: { - error: { - type: 'string', - }, - output: true, - success: { - type: 'boolean', - }, - toolCallId: { - type: 'string', - }, + "required": [ + "streamId", + "checkpointId", + "results" + ], + "type": "object" + }, + "MothershipStreamV1ResumeToolResult": { + "additionalProperties": false, + "properties": { + "error": { + "type": "string" + }, + "output": true, + "success": { + "type": "boolean" + }, + "toolCallId": { + "type": "string" + } }, - required: ['toolCallId', 'success'], - type: 'object', - }, - MothershipStreamV1RunKind: { - enum: ['checkpoint_pause', 'resumed', 'compaction_start', 'compaction_done'], - type: 'string', - }, - MothershipStreamV1RunResumedEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1RunResumedPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['run'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "toolCallId", + "success" + ], + "type": "object" + }, + "MothershipStreamV1RunKind": { + "enum": [ + "checkpoint_pause", + "resumed", + "compaction_start", + "compaction_done" + ], + "type": "string" + }, + "MothershipStreamV1RunResumedEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1RunResumedPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "run" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1RunResumedPayload: { - additionalProperties: false, - properties: { - kind: { - enum: ['resumed'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1RunResumedPayload": { + "additionalProperties": false, + "properties": { + "kind": { + "enum": [ + "resumed" + ], + "type": "string" + } }, - required: ['kind'], - type: 'object', - }, - MothershipStreamV1SessionChatEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1SessionChatPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['session'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind" + ], + "type": "object" + }, + "MothershipStreamV1SessionChatEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1SessionChatPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "session" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1SessionChatPayload: { - additionalProperties: false, - properties: { - chatId: { - type: 'string', - }, - kind: { - enum: ['chat'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1SessionChatPayload": { + "additionalProperties": false, + "properties": { + "chatId": { + "type": "string" + }, + "kind": { + "enum": [ + "chat" + ], + "type": "string" + } }, - required: ['kind', 'chatId'], - type: 'object', - }, - MothershipStreamV1SessionKind: { - enum: ['trace', 'chat', 'title', 'start'], - type: 'string', - }, - MothershipStreamV1SessionStartData: { - additionalProperties: false, - properties: { - responseId: { - type: 'string', - }, + "required": [ + "kind", + "chatId" + ], + "type": "object" + }, + "MothershipStreamV1SessionKind": { + "enum": [ + "trace", + "chat", + "title", + "start" + ], + "type": "string" + }, + "MothershipStreamV1SessionStartData": { + "additionalProperties": false, + "properties": { + "responseId": { + "type": "string" + } }, - type: 'object', + "type": "object" }, - MothershipStreamV1SessionStartEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1SessionStartPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', + "MothershipStreamV1SessionStartEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1SessionStartPayload" }, - seq: { - type: 'integer', + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', + "seq": { + "type": "integer" }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" }, - ts: { - type: 'string', + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" }, - type: { - enum: ['session'], - type: 'string', + "ts": { + "type": "string" }, - v: { - enum: [1], - type: 'integer', + "type": { + "enum": [ + "session" + ], + "type": "string" }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1SessionStartPayload: { - additionalProperties: false, - properties: { - data: { - $ref: '#/$defs/MothershipStreamV1SessionStartData', - }, - kind: { - enum: ['start'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1SessionStartPayload": { + "additionalProperties": false, + "properties": { + "data": { + "$ref": "#/$defs/MothershipStreamV1SessionStartData" + }, + "kind": { + "enum": [ + "start" + ], + "type": "string" + } }, - required: ['kind'], - type: 'object', - }, - MothershipStreamV1SessionTitleEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1SessionTitlePayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['session'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind" + ], + "type": "object" + }, + "MothershipStreamV1SessionTitleEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1SessionTitlePayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "session" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1SessionTitlePayload: { - additionalProperties: false, - properties: { - kind: { - enum: ['title'], - type: 'string', - }, - title: { - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1SessionTitlePayload": { + "additionalProperties": false, + "properties": { + "kind": { + "enum": [ + "title" + ], + "type": "string" + }, + "title": { + "type": "string" + } }, - required: ['kind', 'title'], - type: 'object', - }, - MothershipStreamV1SessionTraceEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1SessionTracePayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['session'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind", + "title" + ], + "type": "object" + }, + "MothershipStreamV1SessionTraceEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1SessionTracePayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "session" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1SessionTracePayload: { - additionalProperties: false, - properties: { - kind: { - enum: ['trace'], - type: 'string', - }, - requestId: { - type: 'string', - }, - spanId: { - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1SessionTracePayload": { + "additionalProperties": false, + "properties": { + "kind": { + "enum": [ + "trace" + ], + "type": "string" + }, + "requestId": { + "type": "string" + }, + "spanId": { + "type": "string" + } }, - required: ['kind', 'requestId'], - type: 'object', - }, - MothershipStreamV1SpanKind: { - enum: ['subagent'], - type: 'string', - }, - MothershipStreamV1SpanLifecycleEvent: { - enum: ['start', 'end'], - type: 'string', - }, - MothershipStreamV1SpanPayloadKind: { - enum: ['subagent', 'structured_result', 'subagent_result'], - type: 'string', - }, - MothershipStreamV1StreamCursor: { - additionalProperties: false, - properties: { - cursor: { - type: 'string', - }, - seq: { - type: 'integer', - }, - streamId: { - type: 'string', - }, + "required": [ + "kind", + "requestId" + ], + "type": "object" + }, + "MothershipStreamV1SpanKind": { + "enum": [ + "subagent" + ], + "type": "string" + }, + "MothershipStreamV1SpanLifecycleEvent": { + "enum": [ + "start", + "end" + ], + "type": "string" + }, + "MothershipStreamV1SpanPayloadKind": { + "enum": [ + "subagent", + "structured_result", + "subagent_result" + ], + "type": "string" + }, + "MothershipStreamV1StreamCursor": { + "additionalProperties": false, + "properties": { + "cursor": { + "type": "string" + }, + "seq": { + "type": "integer" + }, + "streamId": { + "type": "string" + } }, - required: ['streamId', 'cursor', 'seq'], - type: 'object', - }, - MothershipStreamV1StreamRef: { - additionalProperties: false, - properties: { - chatId: { - type: 'string', - }, - cursor: { - type: 'string', - }, - streamId: { - type: 'string', - }, + "required": [ + "streamId", + "cursor", + "seq" + ], + "type": "object" + }, + "MothershipStreamV1StreamRef": { + "additionalProperties": false, + "properties": { + "chatId": { + "type": "string" + }, + "cursor": { + "type": "string" + }, + "streamId": { + "type": "string" + } }, - required: ['streamId'], - type: 'object', - }, - MothershipStreamV1StreamScope: { - additionalProperties: false, - properties: { - agentId: { - type: 'string', - }, - lane: { - enum: ['subagent'], - type: 'string', - }, - parentToolCallId: { - type: 'string', - }, + "required": [ + "streamId" + ], + "type": "object" + }, + "MothershipStreamV1StreamScope": { + "additionalProperties": false, + "properties": { + "agentId": { + "type": "string" + }, + "lane": { + "enum": [ + "subagent" + ], + "type": "string" + }, + "parentToolCallId": { + "type": "string" + } }, - required: ['lane'], - type: 'object', - }, - MothershipStreamV1StructuredResultSpanEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1StructuredResultSpanPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['span'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "lane" + ], + "type": "object" + }, + "MothershipStreamV1StructuredResultSpanEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1StructuredResultSpanPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "span" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1StructuredResultSpanPayload: { - additionalProperties: false, - properties: { - agent: { - type: 'string', - }, - data: true, - kind: { - enum: ['structured_result'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1StructuredResultSpanPayload": { + "additionalProperties": false, + "properties": { + "agent": { + "type": "string" + }, + "data": true, + "kind": { + "enum": [ + "structured_result" + ], + "type": "string" + } }, - required: ['kind'], - type: 'object', - }, - MothershipStreamV1SubagentResultSpanEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1SubagentResultSpanPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['span'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind" + ], + "type": "object" + }, + "MothershipStreamV1SubagentResultSpanEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1SubagentResultSpanPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "span" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1SubagentResultSpanPayload: { - additionalProperties: false, - properties: { - agent: { - type: 'string', - }, - data: true, - kind: { - enum: ['subagent_result'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1SubagentResultSpanPayload": { + "additionalProperties": false, + "properties": { + "agent": { + "type": "string" + }, + "data": true, + "kind": { + "enum": [ + "subagent_result" + ], + "type": "string" + } }, - required: ['kind'], - type: 'object', - }, - MothershipStreamV1SubagentSpanEndEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1SubagentSpanEndPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['span'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind" + ], + "type": "object" + }, + "MothershipStreamV1SubagentSpanEndEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1SubagentSpanEndPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "span" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1SubagentSpanEndPayload: { - additionalProperties: false, - properties: { - agent: { - type: 'string', - }, - data: true, - event: { - enum: ['end'], - type: 'string', - }, - kind: { - enum: ['subagent'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1SubagentSpanEndPayload": { + "additionalProperties": false, + "properties": { + "agent": { + "type": "string" + }, + "data": true, + "event": { + "enum": [ + "end" + ], + "type": "string" + }, + "kind": { + "enum": [ + "subagent" + ], + "type": "string" + } }, - required: ['kind', 'event'], - type: 'object', - }, - MothershipStreamV1SubagentSpanStartEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1SubagentSpanStartPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['span'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind", + "event" + ], + "type": "object" + }, + "MothershipStreamV1SubagentSpanStartEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1SubagentSpanStartPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "span" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1SubagentSpanStartPayload: { - additionalProperties: false, - properties: { - agent: { - type: 'string', - }, - data: true, - event: { - enum: ['start'], - type: 'string', - }, - kind: { - enum: ['subagent'], - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1SubagentSpanStartPayload": { + "additionalProperties": false, + "properties": { + "agent": { + "type": "string" + }, + "data": true, + "event": { + "enum": [ + "start" + ], + "type": "string" + }, + "kind": { + "enum": [ + "subagent" + ], + "type": "string" + } }, - required: ['kind', 'event'], - type: 'object', - }, - MothershipStreamV1TextChannel: { - enum: ['assistant', 'thinking'], - type: 'string', - }, - MothershipStreamV1TextEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1TextPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['text'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "kind", + "event" + ], + "type": "object" + }, + "MothershipStreamV1TextChannel": { + "enum": [ + "assistant", + "thinking" + ], + "type": "string" + }, + "MothershipStreamV1TextEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1TextPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "text" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1TextPayload: { - additionalProperties: false, - properties: { - channel: { - $ref: '#/$defs/MothershipStreamV1TextChannel', - }, - text: { - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1TextPayload": { + "additionalProperties": false, + "properties": { + "channel": { + "$ref": "#/$defs/MothershipStreamV1TextChannel" + }, + "text": { + "type": "string" + } }, - required: ['channel', 'text'], - type: 'object', - }, - MothershipStreamV1ToolArgsDeltaEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1ToolArgsDeltaPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['tool'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "channel", + "text" + ], + "type": "object" + }, + "MothershipStreamV1ToolArgsDeltaEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1ToolArgsDeltaPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "tool" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1ToolArgsDeltaPayload: { - additionalProperties: false, - properties: { - argumentsDelta: { - type: 'string', - }, - executor: { - $ref: '#/$defs/MothershipStreamV1ToolExecutor', - }, - mode: { - $ref: '#/$defs/MothershipStreamV1ToolMode', - }, - phase: { - enum: ['args_delta'], - type: 'string', - }, - toolCallId: { - type: 'string', - }, - toolName: { - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1ToolArgsDeltaPayload": { + "additionalProperties": false, + "properties": { + "argumentsDelta": { + "type": "string" + }, + "executor": { + "$ref": "#/$defs/MothershipStreamV1ToolExecutor" + }, + "mode": { + "$ref": "#/$defs/MothershipStreamV1ToolMode" + }, + "phase": { + "enum": [ + "args_delta" + ], + "type": "string" + }, + "toolCallId": { + "type": "string" + }, + "toolName": { + "type": "string" + } }, - required: ['toolCallId', 'toolName', 'argumentsDelta', 'executor', 'mode', 'phase'], - type: 'object', - }, - MothershipStreamV1ToolCallDescriptor: { - additionalProperties: false, - properties: { - arguments: { - $ref: '#/$defs/MothershipStreamV1AdditionalPropertiesMap', - }, - executor: { - $ref: '#/$defs/MothershipStreamV1ToolExecutor', - }, - mode: { - $ref: '#/$defs/MothershipStreamV1ToolMode', - }, - partial: { - type: 'boolean', - }, - phase: { - enum: ['call'], - type: 'string', - }, - requiresConfirmation: { - type: 'boolean', - }, - status: { - $ref: '#/$defs/MothershipStreamV1ToolStatus', - }, - toolCallId: { - type: 'string', - }, - toolName: { - type: 'string', - }, - ui: { - $ref: '#/$defs/MothershipStreamV1ToolUI', - }, + "required": [ + "toolCallId", + "toolName", + "argumentsDelta", + "executor", + "mode", + "phase" + ], + "type": "object" + }, + "MothershipStreamV1ToolCallDescriptor": { + "additionalProperties": false, + "properties": { + "arguments": { + "$ref": "#/$defs/MothershipStreamV1AdditionalPropertiesMap" + }, + "executor": { + "$ref": "#/$defs/MothershipStreamV1ToolExecutor" + }, + "mode": { + "$ref": "#/$defs/MothershipStreamV1ToolMode" + }, + "partial": { + "type": "boolean" + }, + "phase": { + "enum": [ + "call" + ], + "type": "string" + }, + "requiresConfirmation": { + "type": "boolean" + }, + "status": { + "$ref": "#/$defs/MothershipStreamV1ToolStatus" + }, + "toolCallId": { + "type": "string" + }, + "toolName": { + "type": "string" + }, + "ui": { + "$ref": "#/$defs/MothershipStreamV1ToolUI" + } }, - required: ['toolCallId', 'toolName', 'executor', 'mode', 'phase'], - type: 'object', - }, - MothershipStreamV1ToolCallEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1ToolCallDescriptor', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['tool'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "toolCallId", + "toolName", + "executor", + "mode", + "phase" + ], + "type": "object" + }, + "MothershipStreamV1ToolCallEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1ToolCallDescriptor" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "tool" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1ToolExecutor: { - enum: ['go', 'sim', 'client'], - type: 'string', - }, - MothershipStreamV1ToolMode: { - enum: ['sync', 'async'], - type: 'string', - }, - MothershipStreamV1ToolOutcome: { - enum: ['success', 'error', 'cancelled', 'skipped', 'rejected'], - type: 'string', - }, - MothershipStreamV1ToolPhase: { - enum: ['call', 'args_delta', 'result'], - type: 'string', - }, - MothershipStreamV1ToolResultEventEnvelope: { - additionalProperties: false, - properties: { - payload: { - $ref: '#/$defs/MothershipStreamV1ToolResultPayload', - }, - scope: { - $ref: '#/$defs/MothershipStreamV1StreamScope', - }, - seq: { - type: 'integer', - }, - stream: { - $ref: '#/$defs/MothershipStreamV1StreamRef', - }, - trace: { - $ref: '#/$defs/MothershipStreamV1Trace', - }, - ts: { - type: 'string', - }, - type: { - enum: ['tool'], - type: 'string', - }, - v: { - enum: [1], - type: 'integer', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1ToolExecutor": { + "enum": [ + "go", + "sim", + "client" + ], + "type": "string" + }, + "MothershipStreamV1ToolMode": { + "enum": [ + "sync", + "async" + ], + "type": "string" + }, + "MothershipStreamV1ToolOutcome": { + "enum": [ + "success", + "error", + "cancelled", + "skipped", + "rejected" + ], + "type": "string" + }, + "MothershipStreamV1ToolPhase": { + "enum": [ + "call", + "args_delta", + "result" + ], + "type": "string" + }, + "MothershipStreamV1ToolResultEventEnvelope": { + "additionalProperties": false, + "properties": { + "payload": { + "$ref": "#/$defs/MothershipStreamV1ToolResultPayload" + }, + "scope": { + "$ref": "#/$defs/MothershipStreamV1StreamScope" + }, + "seq": { + "type": "integer" + }, + "stream": { + "$ref": "#/$defs/MothershipStreamV1StreamRef" + }, + "trace": { + "$ref": "#/$defs/MothershipStreamV1Trace" + }, + "ts": { + "type": "string" + }, + "type": { + "enum": [ + "tool" + ], + "type": "string" + }, + "v": { + "enum": [ + 1 + ], + "type": "integer" + } }, - required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], - type: 'object', - }, - MothershipStreamV1ToolResultPayload: { - additionalProperties: false, - properties: { - error: { - type: 'string', - }, - executor: { - $ref: '#/$defs/MothershipStreamV1ToolExecutor', - }, - mode: { - $ref: '#/$defs/MothershipStreamV1ToolMode', - }, - output: true, - phase: { - enum: ['result'], - type: 'string', - }, - status: { - $ref: '#/$defs/MothershipStreamV1ToolStatus', - }, - success: { - type: 'boolean', - }, - toolCallId: { - type: 'string', - }, - toolName: { - type: 'string', - }, + "required": [ + "v", + "seq", + "ts", + "stream", + "type", + "payload" + ], + "type": "object" + }, + "MothershipStreamV1ToolResultPayload": { + "additionalProperties": false, + "properties": { + "error": { + "type": "string" + }, + "executor": { + "$ref": "#/$defs/MothershipStreamV1ToolExecutor" + }, + "mode": { + "$ref": "#/$defs/MothershipStreamV1ToolMode" + }, + "output": true, + "phase": { + "enum": [ + "result" + ], + "type": "string" + }, + "status": { + "$ref": "#/$defs/MothershipStreamV1ToolStatus" + }, + "success": { + "type": "boolean" + }, + "toolCallId": { + "type": "string" + }, + "toolName": { + "type": "string" + } }, - required: ['toolCallId', 'toolName', 'executor', 'mode', 'phase', 'success'], - type: 'object', - }, - MothershipStreamV1ToolStatus: { - enum: ['generating', 'executing', 'success', 'error', 'cancelled', 'skipped', 'rejected'], - type: 'string', - }, - MothershipStreamV1ToolUI: { - additionalProperties: false, - properties: { - clientExecutable: { - type: 'boolean', - }, - hidden: { - type: 'boolean', - }, - icon: { - type: 'string', - }, - internal: { - type: 'boolean', - }, - phaseLabel: { - type: 'string', - }, - requiresConfirmation: { - type: 'boolean', - }, - title: { - type: 'string', - }, + "required": [ + "toolCallId", + "toolName", + "executor", + "mode", + "phase", + "success" + ], + "type": "object" + }, + "MothershipStreamV1ToolStatus": { + "enum": [ + "generating", + "executing", + "success", + "error", + "cancelled", + "skipped", + "rejected" + ], + "type": "string" + }, + "MothershipStreamV1ToolUI": { + "additionalProperties": false, + "properties": { + "clientExecutable": { + "type": "boolean" + }, + "hidden": { + "type": "boolean" + }, + "icon": { + "type": "string" + }, + "internal": { + "type": "boolean" + }, + "phaseLabel": { + "type": "string" + }, + "requiresConfirmation": { + "type": "boolean" + }, + "title": { + "type": "string" + } }, - type: 'object', + "type": "object" }, - MothershipStreamV1Trace: { - additionalProperties: false, - properties: { - goTraceId: { - description: - 'OTel trace ID from the first Go ingress. May differ from requestId when Sim assigns the canonical request identity.', - type: 'string', + "MothershipStreamV1Trace": { + "additionalProperties": false, + "properties": { + "goTraceId": { + "description": "OTel trace ID from the first Go ingress. May differ from requestId when Sim assigns the canonical request identity.", + "type": "string" }, - requestId: { - type: 'string', - }, - spanId: { - type: 'string', + "requestId": { + "type": "string" }, + "spanId": { + "type": "string" + } }, - required: ['requestId'], - type: 'object', + "required": [ + "requestId" + ], + "type": "object" }, - MothershipStreamV1UsageData: { - additionalProperties: false, - properties: { - cache_creation_input_tokens: { - type: 'integer', - }, - cache_read_input_tokens: { - type: 'integer', + "MothershipStreamV1UsageData": { + "additionalProperties": false, + "properties": { + "cache_creation_input_tokens": { + "type": "integer" }, - input_tokens: { - type: 'integer', + "cache_read_input_tokens": { + "type": "integer" }, - model: { - type: 'string', + "input_tokens": { + "type": "integer" }, - output_tokens: { - type: 'integer', + "model": { + "type": "string" }, - total_tokens: { - type: 'integer', + "output_tokens": { + "type": "integer" }, + "total_tokens": { + "type": "integer" + } }, - type: 'object', - }, + "type": "object" + } }, - $id: 'mothership-stream-v1.schema.json', - $schema: 'https://json-schema.org/draft/2020-12/schema', - description: 'Shared execution-oriented mothership stream contract from Go to Sim.', - oneOf: [ + "$id": "mothership-stream-v1.schema.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "Shared execution-oriented mothership stream contract from Go to Sim.", + "oneOf": [ { - $ref: '#/$defs/MothershipStreamV1SessionStartEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1SessionStartEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1SessionChatEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1SessionChatEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1SessionTitleEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1SessionTitleEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1SessionTraceEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1SessionTraceEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1TextEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1TextEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1ToolCallEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1ToolCallEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1ToolArgsDeltaEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1ToolArgsDeltaEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1ToolResultEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1ToolResultEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1SubagentSpanStartEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1SubagentSpanStartEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1SubagentSpanEndEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1SubagentSpanEndEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1StructuredResultSpanEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1StructuredResultSpanEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1SubagentResultSpanEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1SubagentResultSpanEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1ResourceUpsertEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1ResourceUpsertEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1ResourceRemoveEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1ResourceRemoveEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1CheckpointPauseEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1CheckpointPauseEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1RunResumedEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1RunResumedEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1CompactionStartEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1CompactionStartEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1CompactionDoneEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1CompactionDoneEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1ErrorEventEnvelope', + "$ref": "#/$defs/MothershipStreamV1ErrorEventEnvelope" }, { - $ref: '#/$defs/MothershipStreamV1CompleteEventEnvelope', - }, + "$ref": "#/$defs/MothershipStreamV1CompleteEventEnvelope" + } ], - title: 'MothershipStreamV1EventEnvelope', + "title": "MothershipStreamV1EventEnvelope" } diff --git a/apps/sim/lib/copilot/generated/mothership-stream-v1.ts b/apps/sim/lib/copilot/generated/mothership-stream-v1.ts index ef7f2e065fb..95d86b3823d 100644 --- a/apps/sim/lib/copilot/generated/mothership-stream-v1.ts +++ b/apps/sim/lib/copilot/generated/mothership-stream-v1.ts @@ -24,534 +24,512 @@ export type MothershipStreamV1EventEnvelope = | MothershipStreamV1CompactionStartEventEnvelope | MothershipStreamV1CompactionDoneEventEnvelope | MothershipStreamV1ErrorEventEnvelope - | MothershipStreamV1CompleteEventEnvelope -export type MothershipStreamV1TextChannel = 'assistant' | 'thinking' -export type MothershipStreamV1ToolExecutor = 'go' | 'sim' | 'client' -export type MothershipStreamV1ToolMode = 'sync' | 'async' + | MothershipStreamV1CompleteEventEnvelope; +export type MothershipStreamV1TextChannel = "assistant" | "thinking"; +export type MothershipStreamV1ToolExecutor = "go" | "sim" | "client"; +export type MothershipStreamV1ToolMode = "sync" | "async"; export type MothershipStreamV1ToolStatus = - | 'generating' - | 'executing' - | 'success' - | 'error' - | 'cancelled' - | 'skipped' - | 'rejected' -export type MothershipStreamV1CompletionStatus = 'complete' | 'error' | 'cancelled' + | "generating" + | "executing" + | "success" + | "error" + | "cancelled" + | "skipped" + | "rejected"; +export type MothershipStreamV1CompletionStatus = "complete" | "error" | "cancelled"; export interface MothershipStreamV1SessionStartEventEnvelope { - payload: MothershipStreamV1SessionStartPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'session' - v: 1 + payload: MothershipStreamV1SessionStartPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "session"; + v: 1; } export interface MothershipStreamV1SessionStartPayload { - data?: MothershipStreamV1SessionStartData - kind: 'start' + data?: MothershipStreamV1SessionStartData; + kind: "start"; } export interface MothershipStreamV1SessionStartData { - responseId?: string + responseId?: string; } export interface MothershipStreamV1StreamScope { - agentId?: string - lane: 'subagent' - parentToolCallId?: string + agentId?: string; + lane: "subagent"; + parentToolCallId?: string; } export interface MothershipStreamV1StreamRef { - chatId?: string - cursor?: string - streamId: string + chatId?: string; + cursor?: string; + streamId: string; } export interface MothershipStreamV1Trace { /** * OTel trace ID from the first Go ingress. May differ from requestId when Sim assigns the canonical request identity. */ - goTraceId?: string - requestId: string - spanId?: string + goTraceId?: string; + requestId: string; + spanId?: string; } export interface MothershipStreamV1SessionChatEventEnvelope { - payload: MothershipStreamV1SessionChatPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'session' - v: 1 + payload: MothershipStreamV1SessionChatPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "session"; + v: 1; } export interface MothershipStreamV1SessionChatPayload { - chatId: string - kind: 'chat' + chatId: string; + kind: "chat"; } export interface MothershipStreamV1SessionTitleEventEnvelope { - payload: MothershipStreamV1SessionTitlePayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'session' - v: 1 + payload: MothershipStreamV1SessionTitlePayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "session"; + v: 1; } export interface MothershipStreamV1SessionTitlePayload { - kind: 'title' - title: string + kind: "title"; + title: string; } export interface MothershipStreamV1SessionTraceEventEnvelope { - payload: MothershipStreamV1SessionTracePayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'session' - v: 1 + payload: MothershipStreamV1SessionTracePayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "session"; + v: 1; } export interface MothershipStreamV1SessionTracePayload { - kind: 'trace' - requestId: string - spanId?: string + kind: "trace"; + requestId: string; + spanId?: string; } export interface MothershipStreamV1TextEventEnvelope { - payload: MothershipStreamV1TextPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'text' - v: 1 + payload: MothershipStreamV1TextPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "text"; + v: 1; } export interface MothershipStreamV1TextPayload { - channel: MothershipStreamV1TextChannel - text: string + channel: MothershipStreamV1TextChannel; + text: string; } export interface MothershipStreamV1ToolCallEventEnvelope { - payload: MothershipStreamV1ToolCallDescriptor - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'tool' - v: 1 + payload: MothershipStreamV1ToolCallDescriptor; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "tool"; + v: 1; } export interface MothershipStreamV1ToolCallDescriptor { - arguments?: MothershipStreamV1AdditionalPropertiesMap - executor: MothershipStreamV1ToolExecutor - mode: MothershipStreamV1ToolMode - partial?: boolean - phase: 'call' - requiresConfirmation?: boolean - status?: MothershipStreamV1ToolStatus - toolCallId: string - toolName: string - ui?: MothershipStreamV1ToolUI + arguments?: MothershipStreamV1AdditionalPropertiesMap; + executor: MothershipStreamV1ToolExecutor; + mode: MothershipStreamV1ToolMode; + partial?: boolean; + phase: "call"; + requiresConfirmation?: boolean; + status?: MothershipStreamV1ToolStatus; + toolCallId: string; + toolName: string; + ui?: MothershipStreamV1ToolUI; } export interface MothershipStreamV1AdditionalPropertiesMap { - [k: string]: unknown + [k: string]: unknown; } export interface MothershipStreamV1ToolUI { - clientExecutable?: boolean - hidden?: boolean - icon?: string - internal?: boolean - phaseLabel?: string - requiresConfirmation?: boolean - title?: string + clientExecutable?: boolean; + hidden?: boolean; + icon?: string; + internal?: boolean; + phaseLabel?: string; + requiresConfirmation?: boolean; + title?: string; } export interface MothershipStreamV1ToolArgsDeltaEventEnvelope { - payload: MothershipStreamV1ToolArgsDeltaPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'tool' - v: 1 + payload: MothershipStreamV1ToolArgsDeltaPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "tool"; + v: 1; } export interface MothershipStreamV1ToolArgsDeltaPayload { - argumentsDelta: string - executor: MothershipStreamV1ToolExecutor - mode: MothershipStreamV1ToolMode - phase: 'args_delta' - toolCallId: string - toolName: string + argumentsDelta: string; + executor: MothershipStreamV1ToolExecutor; + mode: MothershipStreamV1ToolMode; + phase: "args_delta"; + toolCallId: string; + toolName: string; } export interface MothershipStreamV1ToolResultEventEnvelope { - payload: MothershipStreamV1ToolResultPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'tool' - v: 1 + payload: MothershipStreamV1ToolResultPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "tool"; + v: 1; } export interface MothershipStreamV1ToolResultPayload { - error?: string - executor: MothershipStreamV1ToolExecutor - mode: MothershipStreamV1ToolMode - output?: unknown - phase: 'result' - status?: MothershipStreamV1ToolStatus - success: boolean - toolCallId: string - toolName: string + error?: string; + executor: MothershipStreamV1ToolExecutor; + mode: MothershipStreamV1ToolMode; + output?: unknown; + phase: "result"; + status?: MothershipStreamV1ToolStatus; + success: boolean; + toolCallId: string; + toolName: string; } export interface MothershipStreamV1SubagentSpanStartEventEnvelope { - payload: MothershipStreamV1SubagentSpanStartPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'span' - v: 1 + payload: MothershipStreamV1SubagentSpanStartPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "span"; + v: 1; } export interface MothershipStreamV1SubagentSpanStartPayload { - agent?: string - data?: unknown - event: 'start' - kind: 'subagent' + agent?: string; + data?: unknown; + event: "start"; + kind: "subagent"; } export interface MothershipStreamV1SubagentSpanEndEventEnvelope { - payload: MothershipStreamV1SubagentSpanEndPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'span' - v: 1 + payload: MothershipStreamV1SubagentSpanEndPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "span"; + v: 1; } export interface MothershipStreamV1SubagentSpanEndPayload { - agent?: string - data?: unknown - event: 'end' - kind: 'subagent' + agent?: string; + data?: unknown; + event: "end"; + kind: "subagent"; } export interface MothershipStreamV1StructuredResultSpanEventEnvelope { - payload: MothershipStreamV1StructuredResultSpanPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'span' - v: 1 + payload: MothershipStreamV1StructuredResultSpanPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "span"; + v: 1; } export interface MothershipStreamV1StructuredResultSpanPayload { - agent?: string - data?: unknown - kind: 'structured_result' + agent?: string; + data?: unknown; + kind: "structured_result"; } export interface MothershipStreamV1SubagentResultSpanEventEnvelope { - payload: MothershipStreamV1SubagentResultSpanPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'span' - v: 1 + payload: MothershipStreamV1SubagentResultSpanPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "span"; + v: 1; } export interface MothershipStreamV1SubagentResultSpanPayload { - agent?: string - data?: unknown - kind: 'subagent_result' + agent?: string; + data?: unknown; + kind: "subagent_result"; } export interface MothershipStreamV1ResourceUpsertEventEnvelope { - payload: MothershipStreamV1ResourceUpsertPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'resource' - v: 1 + payload: MothershipStreamV1ResourceUpsertPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "resource"; + v: 1; } export interface MothershipStreamV1ResourceUpsertPayload { - op: 'upsert' - resource: MothershipStreamV1ResourceDescriptor + op: "upsert"; + resource: MothershipStreamV1ResourceDescriptor; } export interface MothershipStreamV1ResourceDescriptor { - id: string - title?: string - type: string + id: string; + title?: string; + type: string; } export interface MothershipStreamV1ResourceRemoveEventEnvelope { - payload: MothershipStreamV1ResourceRemovePayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'resource' - v: 1 + payload: MothershipStreamV1ResourceRemovePayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "resource"; + v: 1; } export interface MothershipStreamV1ResourceRemovePayload { - op: 'remove' - resource: MothershipStreamV1ResourceDescriptor + op: "remove"; + resource: MothershipStreamV1ResourceDescriptor; } export interface MothershipStreamV1CheckpointPauseEventEnvelope { - payload: MothershipStreamV1CheckpointPausePayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'run' - v: 1 + payload: MothershipStreamV1CheckpointPausePayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "run"; + v: 1; } export interface MothershipStreamV1CheckpointPausePayload { - checkpointId: string - executionId: string - frames?: MothershipStreamV1CheckpointPauseFrame[] - kind: 'checkpoint_pause' - pendingToolCallIds: string[] - runId: string + checkpointId: string; + executionId: string; + frames?: MothershipStreamV1CheckpointPauseFrame[]; + kind: "checkpoint_pause"; + pendingToolCallIds: string[]; + runId: string; } export interface MothershipStreamV1CheckpointPauseFrame { - parentToolCallId: string - parentToolName: string - pendingToolIds: string[] + parentToolCallId: string; + parentToolName: string; + pendingToolIds: string[]; } export interface MothershipStreamV1RunResumedEventEnvelope { - payload: MothershipStreamV1RunResumedPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'run' - v: 1 + payload: MothershipStreamV1RunResumedPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "run"; + v: 1; } export interface MothershipStreamV1RunResumedPayload { - kind: 'resumed' + kind: "resumed"; } export interface MothershipStreamV1CompactionStartEventEnvelope { - payload: MothershipStreamV1CompactionStartPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'run' - v: 1 + payload: MothershipStreamV1CompactionStartPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "run"; + v: 1; } export interface MothershipStreamV1CompactionStartPayload { - kind: 'compaction_start' + kind: "compaction_start"; } export interface MothershipStreamV1CompactionDoneEventEnvelope { - payload: MothershipStreamV1CompactionDonePayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'run' - v: 1 + payload: MothershipStreamV1CompactionDonePayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "run"; + v: 1; } export interface MothershipStreamV1CompactionDonePayload { - data?: MothershipStreamV1CompactionDoneData - kind: 'compaction_done' + data?: MothershipStreamV1CompactionDoneData; + kind: "compaction_done"; } export interface MothershipStreamV1CompactionDoneData { - summary_chars: number + summary_chars: number; } export interface MothershipStreamV1ErrorEventEnvelope { - payload: MothershipStreamV1ErrorPayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'error' - v: 1 + payload: MothershipStreamV1ErrorPayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "error"; + v: 1; } export interface MothershipStreamV1ErrorPayload { - code?: string - data?: unknown - displayMessage?: string - error?: string - message: string - provider?: string + code?: string; + data?: unknown; + displayMessage?: string; + error?: string; + message: string; + provider?: string; } export interface MothershipStreamV1CompleteEventEnvelope { - payload: MothershipStreamV1CompletePayload - scope?: MothershipStreamV1StreamScope - seq: number - stream: MothershipStreamV1StreamRef - trace?: MothershipStreamV1Trace - ts: string - type: 'complete' - v: 1 + payload: MothershipStreamV1CompletePayload; + scope?: MothershipStreamV1StreamScope; + seq: number; + stream: MothershipStreamV1StreamRef; + trace?: MothershipStreamV1Trace; + ts: string; + type: "complete"; + v: 1; } export interface MothershipStreamV1CompletePayload { - cost?: MothershipStreamV1CostData - reason?: string - response?: unknown - status: MothershipStreamV1CompletionStatus - usage?: MothershipStreamV1UsageData + cost?: MothershipStreamV1CostData; + reason?: string; + response?: unknown; + status: MothershipStreamV1CompletionStatus; + usage?: MothershipStreamV1UsageData; } export interface MothershipStreamV1CostData { - input?: number - output?: number - total?: number + input?: number; + output?: number; + total?: number; } export interface MothershipStreamV1UsageData { - cache_creation_input_tokens?: number - cache_read_input_tokens?: number - input_tokens?: number - model?: string - output_tokens?: number - total_tokens?: number + cache_creation_input_tokens?: number; + cache_read_input_tokens?: number; + input_tokens?: number; + model?: string; + output_tokens?: number; + total_tokens?: number; } -export type MothershipStreamV1AsyncToolRecordStatus = - | 'pending' - | 'running' - | 'completed' - | 'failed' - | 'cancelled' - | 'delivered' +export type MothershipStreamV1AsyncToolRecordStatus = "pending" | "running" | "completed" | "failed" | "cancelled" | "delivered" export const MothershipStreamV1AsyncToolRecordStatus = { - pending: 'pending', - running: 'running', - completed: 'completed', - failed: 'failed', - cancelled: 'cancelled', - delivered: 'delivered', -} as const + "pending": "pending", + "running": "running", + "completed": "completed", + "failed": "failed", + "cancelled": "cancelled", + "delivered": "delivered", +} as const; export const MothershipStreamV1CompletionStatus = { - complete: 'complete', - error: 'error', - cancelled: 'cancelled', -} as const + "complete": "complete", + "error": "error", + "cancelled": "cancelled", +} as const; -export type MothershipStreamV1EventType = - | 'session' - | 'text' - | 'tool' - | 'span' - | 'resource' - | 'run' - | 'error' - | 'complete' +export type MothershipStreamV1EventType = "session" | "text" | "tool" | "span" | "resource" | "run" | "error" | "complete" export const MothershipStreamV1EventType = { - session: 'session', - text: 'text', - tool: 'tool', - span: 'span', - resource: 'resource', - run: 'run', - error: 'error', - complete: 'complete', -} as const + "session": "session", + "text": "text", + "tool": "tool", + "span": "span", + "resource": "resource", + "run": "run", + "error": "error", + "complete": "complete", +} as const; -export type MothershipStreamV1ResourceOp = 'upsert' | 'remove' +export type MothershipStreamV1ResourceOp = "upsert" | "remove" export const MothershipStreamV1ResourceOp = { - upsert: 'upsert', - remove: 'remove', -} as const + "upsert": "upsert", + "remove": "remove", +} as const; -export type MothershipStreamV1RunKind = - | 'checkpoint_pause' - | 'resumed' - | 'compaction_start' - | 'compaction_done' +export type MothershipStreamV1RunKind = "checkpoint_pause" | "resumed" | "compaction_start" | "compaction_done" export const MothershipStreamV1RunKind = { - checkpoint_pause: 'checkpoint_pause', - resumed: 'resumed', - compaction_start: 'compaction_start', - compaction_done: 'compaction_done', -} as const + "checkpoint_pause": "checkpoint_pause", + "resumed": "resumed", + "compaction_start": "compaction_start", + "compaction_done": "compaction_done", +} as const; -export type MothershipStreamV1SessionKind = 'trace' | 'chat' | 'title' | 'start' +export type MothershipStreamV1SessionKind = "trace" | "chat" | "title" | "start" export const MothershipStreamV1SessionKind = { - trace: 'trace', - chat: 'chat', - title: 'title', - start: 'start', -} as const + "trace": "trace", + "chat": "chat", + "title": "title", + "start": "start", +} as const; -export type MothershipStreamV1SpanKind = 'subagent' +export type MothershipStreamV1SpanKind = "subagent" export const MothershipStreamV1SpanKind = { - subagent: 'subagent', -} as const + "subagent": "subagent", +} as const; -export type MothershipStreamV1SpanLifecycleEvent = 'start' | 'end' +export type MothershipStreamV1SpanLifecycleEvent = "start" | "end" export const MothershipStreamV1SpanLifecycleEvent = { - start: 'start', - end: 'end', -} as const + "start": "start", + "end": "end", +} as const; -export type MothershipStreamV1SpanPayloadKind = 'subagent' | 'structured_result' | 'subagent_result' +export type MothershipStreamV1SpanPayloadKind = "subagent" | "structured_result" | "subagent_result" export const MothershipStreamV1SpanPayloadKind = { - subagent: 'subagent', - structured_result: 'structured_result', - subagent_result: 'subagent_result', -} as const + "subagent": "subagent", + "structured_result": "structured_result", + "subagent_result": "subagent_result", +} as const; export const MothershipStreamV1TextChannel = { - assistant: 'assistant', - thinking: 'thinking', -} as const + "assistant": "assistant", + "thinking": "thinking", +} as const; export const MothershipStreamV1ToolExecutor = { - go: 'go', - sim: 'sim', - client: 'client', -} as const + "go": "go", + "sim": "sim", + "client": "client", +} as const; export const MothershipStreamV1ToolMode = { - sync: 'sync', - async: 'async', -} as const + "sync": "sync", + "async": "async", +} as const; -export type MothershipStreamV1ToolOutcome = - | 'success' - | 'error' - | 'cancelled' - | 'skipped' - | 'rejected' +export type MothershipStreamV1ToolOutcome = "success" | "error" | "cancelled" | "skipped" | "rejected" export const MothershipStreamV1ToolOutcome = { - success: 'success', - error: 'error', - cancelled: 'cancelled', - skipped: 'skipped', - rejected: 'rejected', -} as const + "success": "success", + "error": "error", + "cancelled": "cancelled", + "skipped": "skipped", + "rejected": "rejected", +} as const; -export type MothershipStreamV1ToolPhase = 'call' | 'args_delta' | 'result' +export type MothershipStreamV1ToolPhase = "call" | "args_delta" | "result" export const MothershipStreamV1ToolPhase = { - call: 'call', - args_delta: 'args_delta', - result: 'result', -} as const + "call": "call", + "args_delta": "args_delta", + "result": "result", +} as const; export const MothershipStreamV1ToolStatus = { - generating: 'generating', - executing: 'executing', - success: 'success', - error: 'error', - cancelled: 'cancelled', - skipped: 'skipped', - rejected: 'rejected', -} as const + "generating": "generating", + "executing": "executing", + "success": "success", + "error": "error", + "cancelled": "cancelled", + "skipped": "skipped", + "rejected": "rejected", +} as const; + diff --git a/apps/sim/lib/copilot/generated/request-trace-v1.ts b/apps/sim/lib/copilot/generated/request-trace-v1.ts index f8d2bd06e01..d1cd137dfbc 100644 --- a/apps/sim/lib/copilot/generated/request-trace-v1.ts +++ b/apps/sim/lib/copilot/generated/request-trace-v1.ts @@ -5,129 +5,130 @@ * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1Outcome". */ -export type RequestTraceV1Outcome = 'success' | 'error' | 'cancelled' +export type RequestTraceV1Outcome = "success" | "error" | "cancelled"; /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1SpanSource". */ -export type RequestTraceV1SpanSource = 'sim' | 'go' +export type RequestTraceV1SpanSource = "sim" | "go"; /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1SpanStatus". */ -export type RequestTraceV1SpanStatus = 'ok' | 'error' | 'cancelled' | 'pending' +export type RequestTraceV1SpanStatus = "ok" | "error" | "cancelled" | "pending"; /** * Trace report sent from Sim to Go after a request completes. */ export interface RequestTraceV1SimReport { - chatId?: string - cost?: RequestTraceV1CostSummary - durationMs: number - endMs: number - executionId?: string - goTraceId?: string - outcome: RequestTraceV1Outcome - runId?: string - simRequestId: string - spans: RequestTraceV1Span[] - startMs: number - streamId?: string - usage?: RequestTraceV1UsageSummary + chatId?: string; + cost?: RequestTraceV1CostSummary; + durationMs: number; + endMs: number; + executionId?: string; + goTraceId?: string; + outcome: RequestTraceV1Outcome; + runId?: string; + simRequestId: string; + spans: RequestTraceV1Span[]; + startMs: number; + streamId?: string; + usage?: RequestTraceV1UsageSummary; } /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1CostSummary". */ export interface RequestTraceV1CostSummary { - billedTotalCost?: number - rawTotalCost?: number + billedTotalCost?: number; + rawTotalCost?: number; } /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1Span". */ export interface RequestTraceV1Span { - attributes?: MothershipStreamV1AdditionalPropertiesMap - durationMs: number - endMs: number - kind?: string - name: string - parentName?: string - source?: RequestTraceV1SpanSource - startMs: number - status: RequestTraceV1SpanStatus + attributes?: MothershipStreamV1AdditionalPropertiesMap; + durationMs: number; + endMs: number; + kind?: string; + name: string; + parentName?: string; + source?: RequestTraceV1SpanSource; + startMs: number; + status: RequestTraceV1SpanStatus; } /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "MothershipStreamV1AdditionalPropertiesMap". */ export interface MothershipStreamV1AdditionalPropertiesMap { - [k: string]: unknown + [k: string]: unknown; } /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1UsageSummary". */ export interface RequestTraceV1UsageSummary { - cacheReadTokens?: number - cacheWriteTokens?: number - inputTokens?: number - outputTokens?: number + cacheReadTokens?: number; + cacheWriteTokens?: number; + inputTokens?: number; + outputTokens?: number; } /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1MergedTrace". */ export interface RequestTraceV1MergedTrace { - chatId?: string - cost?: RequestTraceV1CostSummary - durationMs: number - endMs: number - goTraceId: string - outcome: RequestTraceV1Outcome - serviceCharges?: MothershipStreamV1AdditionalPropertiesMap - simRequestId?: string - spans: RequestTraceV1Span[] - startMs: number - streamId?: string - usage?: RequestTraceV1UsageSummary - userId?: string + chatId?: string; + cost?: RequestTraceV1CostSummary; + durationMs: number; + endMs: number; + goTraceId: string; + outcome: RequestTraceV1Outcome; + serviceCharges?: MothershipStreamV1AdditionalPropertiesMap; + simRequestId?: string; + spans: RequestTraceV1Span[]; + startMs: number; + streamId?: string; + usage?: RequestTraceV1UsageSummary; + userId?: string; } /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1SimReport". */ export interface RequestTraceV1SimReport1 { - chatId?: string - cost?: RequestTraceV1CostSummary - durationMs: number - endMs: number - executionId?: string - goTraceId?: string - outcome: RequestTraceV1Outcome - runId?: string - simRequestId: string - spans: RequestTraceV1Span[] - startMs: number - streamId?: string - usage?: RequestTraceV1UsageSummary + chatId?: string; + cost?: RequestTraceV1CostSummary; + durationMs: number; + endMs: number; + executionId?: string; + goTraceId?: string; + outcome: RequestTraceV1Outcome; + runId?: string; + simRequestId: string; + spans: RequestTraceV1Span[]; + startMs: number; + streamId?: string; + usage?: RequestTraceV1UsageSummary; } export const RequestTraceV1Outcome = { - success: 'success', - error: 'error', - cancelled: 'cancelled', -} as const + "success": "success", + "error": "error", + "cancelled": "cancelled", +} as const; export const RequestTraceV1SpanSource = { - sim: 'sim', - go: 'go', -} as const + "sim": "sim", + "go": "go", +} as const; export const RequestTraceV1SpanStatus = { - ok: 'ok', - error: 'error', - cancelled: 'cancelled', - pending: 'pending', -} as const + "ok": "ok", + "error": "error", + "cancelled": "cancelled", + "pending": "pending", +} as const; + diff --git a/apps/sim/lib/copilot/generated/tool-catalog-v1.ts b/apps/sim/lib/copilot/generated/tool-catalog-v1.ts index 2af7b1d660e..cfc93ef496d 100644 --- a/apps/sim/lib/copilot/generated/tool-catalog-v1.ts +++ b/apps/sim/lib/copilot/generated/tool-catalog-v1.ts @@ -3,3107 +3,838 @@ // export interface ToolCatalogEntry { - clientExecutable?: boolean - hidden?: boolean - id: - | 'agent' - | 'auth' - | 'check_deployment_status' - | 'complete_job' - | 'context_write' - | 'crawl_website' - | 'create_file' - | 'create_folder' - | 'create_job' - | 'create_workflow' - | 'create_workspace_mcp_server' - | 'debug' - | 'delete_file' - | 'delete_folder' - | 'delete_workflow' - | 'delete_workspace_mcp_server' - | 'deploy' - | 'deploy_api' - | 'deploy_chat' - | 'deploy_mcp' - | 'download_to_workspace_file' - | 'edit_content' - | 'edit_workflow' - | 'file' - | 'function_execute' - | 'generate_api_key' - | 'generate_image' - | 'generate_visualization' - | 'get_block_outputs' - | 'get_block_upstream_references' - | 'get_deployed_workflow_state' - | 'get_deployment_version' - | 'get_execution_summary' - | 'get_job_logs' - | 'get_page_contents' - | 'get_platform_actions' - | 'get_workflow_data' - | 'get_workflow_logs' - | 'glob' - | 'grep' - | 'job' - | 'knowledge' - | 'knowledge_base' - | 'list_folders' - | 'list_user_workspaces' - | 'list_workspace_mcp_servers' - | 'manage_credential' - | 'manage_custom_tool' - | 'manage_job' - | 'manage_mcp_tool' - | 'manage_skill' - | 'materialize_file' - | 'move_folder' - | 'move_workflow' - | 'oauth_get_auth_link' - | 'oauth_request_access' - | 'open_resource' - | 'read' - | 'redeploy' - | 'rename_file' - | 'rename_workflow' - | 'research' - | 'respond' - | 'restore_resource' - | 'revert_to_version' - | 'run' - | 'run_block' - | 'run_from_block' - | 'run_workflow' - | 'run_workflow_until_block' - | 'scrape_page' - | 'search_documentation' - | 'search_library_docs' - | 'search_online' - | 'search_patterns' - | 'set_block_enabled' - | 'set_environment_variables' - | 'set_global_workflow_variables' - | 'superagent' - | 'table' - | 'tool_search_tool_regex' - | 'update_job_history' - | 'update_workspace_mcp_server' - | 'user_memory' - | 'user_table' - | 'workflow' - | 'workspace_file' - internal?: boolean - mode: 'async' | 'sync' - name: - | 'agent' - | 'auth' - | 'check_deployment_status' - | 'complete_job' - | 'context_write' - | 'crawl_website' - | 'create_file' - | 'create_folder' - | 'create_job' - | 'create_workflow' - | 'create_workspace_mcp_server' - | 'debug' - | 'delete_file' - | 'delete_folder' - | 'delete_workflow' - | 'delete_workspace_mcp_server' - | 'deploy' - | 'deploy_api' - | 'deploy_chat' - | 'deploy_mcp' - | 'download_to_workspace_file' - | 'edit_content' - | 'edit_workflow' - | 'file' - | 'function_execute' - | 'generate_api_key' - | 'generate_image' - | 'generate_visualization' - | 'get_block_outputs' - | 'get_block_upstream_references' - | 'get_deployed_workflow_state' - | 'get_deployment_version' - | 'get_execution_summary' - | 'get_job_logs' - | 'get_page_contents' - | 'get_platform_actions' - | 'get_workflow_data' - | 'get_workflow_logs' - | 'glob' - | 'grep' - | 'job' - | 'knowledge' - | 'knowledge_base' - | 'list_folders' - | 'list_user_workspaces' - | 'list_workspace_mcp_servers' - | 'manage_credential' - | 'manage_custom_tool' - | 'manage_job' - | 'manage_mcp_tool' - | 'manage_skill' - | 'materialize_file' - | 'move_folder' - | 'move_workflow' - | 'oauth_get_auth_link' - | 'oauth_request_access' - | 'open_resource' - | 'read' - | 'redeploy' - | 'rename_file' - | 'rename_workflow' - | 'research' - | 'respond' - | 'restore_resource' - | 'revert_to_version' - | 'run' - | 'run_block' - | 'run_from_block' - | 'run_workflow' - | 'run_workflow_until_block' - | 'scrape_page' - | 'search_documentation' - | 'search_library_docs' - | 'search_online' - | 'search_patterns' - | 'set_block_enabled' - | 'set_environment_variables' - | 'set_global_workflow_variables' - | 'superagent' - | 'table' - | 'tool_search_tool_regex' - | 'update_job_history' - | 'update_workspace_mcp_server' - | 'user_memory' - | 'user_table' - | 'workflow' - | 'workspace_file' - parameters: unknown - requiredPermission?: 'admin' | 'write' - requiresConfirmation?: boolean - resultSchema?: unknown - route: 'client' | 'go' | 'sim' | 'subagent' - subagentId?: - | 'agent' - | 'auth' - | 'debug' - | 'deploy' - | 'file' - | 'job' - | 'knowledge' - | 'research' - | 'run' - | 'superagent' - | 'table' - | 'workflow' + clientExecutable?: boolean; + hidden?: boolean; + id: "agent" | "auth" | "check_deployment_status" | "complete_job" | "context_write" | "crawl_website" | "create_file" | "create_folder" | "create_job" | "create_workflow" | "create_workspace_mcp_server" | "debug" | "delete_file" | "delete_folder" | "delete_workflow" | "delete_workspace_mcp_server" | "deploy" | "deploy_api" | "deploy_chat" | "deploy_mcp" | "download_to_workspace_file" | "edit_content" | "edit_workflow" | "file" | "function_execute" | "generate_api_key" | "generate_image" | "generate_visualization" | "get_block_outputs" | "get_block_upstream_references" | "get_deployed_workflow_state" | "get_deployment_version" | "get_execution_summary" | "get_job_logs" | "get_page_contents" | "get_platform_actions" | "get_workflow_data" | "get_workflow_logs" | "glob" | "grep" | "job" | "knowledge" | "knowledge_base" | "list_folders" | "list_user_workspaces" | "list_workspace_mcp_servers" | "manage_credential" | "manage_custom_tool" | "manage_job" | "manage_mcp_tool" | "manage_skill" | "materialize_file" | "move_folder" | "move_workflow" | "oauth_get_auth_link" | "oauth_request_access" | "open_resource" | "read" | "redeploy" | "rename_file" | "rename_workflow" | "research" | "respond" | "restore_resource" | "revert_to_version" | "run" | "run_block" | "run_from_block" | "run_workflow" | "run_workflow_until_block" | "scrape_page" | "search_documentation" | "search_library_docs" | "search_online" | "search_patterns" | "set_block_enabled" | "set_environment_variables" | "set_global_workflow_variables" | "superagent" | "table" | "tool_search_tool_regex" | "update_job_history" | "update_workspace_mcp_server" | "user_memory" | "user_table" | "workflow" | "workspace_file"; + internal?: boolean; + mode: "async" | "sync"; + name: "agent" | "auth" | "check_deployment_status" | "complete_job" | "context_write" | "crawl_website" | "create_file" | "create_folder" | "create_job" | "create_workflow" | "create_workspace_mcp_server" | "debug" | "delete_file" | "delete_folder" | "delete_workflow" | "delete_workspace_mcp_server" | "deploy" | "deploy_api" | "deploy_chat" | "deploy_mcp" | "download_to_workspace_file" | "edit_content" | "edit_workflow" | "file" | "function_execute" | "generate_api_key" | "generate_image" | "generate_visualization" | "get_block_outputs" | "get_block_upstream_references" | "get_deployed_workflow_state" | "get_deployment_version" | "get_execution_summary" | "get_job_logs" | "get_page_contents" | "get_platform_actions" | "get_workflow_data" | "get_workflow_logs" | "glob" | "grep" | "job" | "knowledge" | "knowledge_base" | "list_folders" | "list_user_workspaces" | "list_workspace_mcp_servers" | "manage_credential" | "manage_custom_tool" | "manage_job" | "manage_mcp_tool" | "manage_skill" | "materialize_file" | "move_folder" | "move_workflow" | "oauth_get_auth_link" | "oauth_request_access" | "open_resource" | "read" | "redeploy" | "rename_file" | "rename_workflow" | "research" | "respond" | "restore_resource" | "revert_to_version" | "run" | "run_block" | "run_from_block" | "run_workflow" | "run_workflow_until_block" | "scrape_page" | "search_documentation" | "search_library_docs" | "search_online" | "search_patterns" | "set_block_enabled" | "set_environment_variables" | "set_global_workflow_variables" | "superagent" | "table" | "tool_search_tool_regex" | "update_job_history" | "update_workspace_mcp_server" | "user_memory" | "user_table" | "workflow" | "workspace_file"; + parameters: unknown; + requiredPermission?: "admin" | "write"; + requiresConfirmation?: boolean; + resultSchema?: unknown; + route: "client" | "go" | "sim" | "subagent"; + subagentId?: "agent" | "auth" | "debug" | "deploy" | "file" | "job" | "knowledge" | "research" | "run" | "superagent" | "table" | "workflow"; } export const Agent: ToolCatalogEntry = { - id: 'agent', - name: 'agent', - route: 'subagent', - mode: 'async', - parameters: { - properties: { - request: { description: 'What tool/skill/MCP action is needed.', type: 'string' }, - }, - required: ['request'], - type: 'object', - }, - subagentId: 'agent', + id: "agent", + name: "agent", + route: "subagent", + mode: "async", + parameters: {"properties":{"request":{"description":"What tool/skill/MCP action is needed.","type":"string"}},"required":["request"],"type":"object"}, + subagentId: "agent", internal: true, - requiredPermission: 'write', -} + requiredPermission: "write", +}; export const Auth: ToolCatalogEntry = { - id: 'auth', - name: 'auth', - route: 'subagent', - mode: 'async', - parameters: { - properties: { - request: { description: 'What authentication/credential action is needed.', type: 'string' }, - }, - required: ['request'], - type: 'object', - }, - subagentId: 'auth', + id: "auth", + name: "auth", + route: "subagent", + mode: "async", + parameters: {"properties":{"request":{"description":"What authentication/credential action is needed.","type":"string"}},"required":["request"],"type":"object"}, + subagentId: "auth", internal: true, -} +}; export const CheckDeploymentStatus: ToolCatalogEntry = { - id: 'check_deployment_status', - name: 'check_deployment_status', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - workflowId: { - type: 'string', - description: 'Workflow ID to check (defaults to current workflow)', - }, - }, - }, -} + id: "check_deployment_status", + name: "check_deployment_status", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"workflowId":{"type":"string","description":"Workflow ID to check (defaults to current workflow)"}}}, +}; export const CompleteJob: ToolCatalogEntry = { - id: 'complete_job', - name: 'complete_job', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - jobId: { type: 'string', description: 'The ID of the job to mark as completed.' }, - }, - required: ['jobId'], - }, -} + id: "complete_job", + name: "complete_job", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"jobId":{"type":"string","description":"The ID of the job to mark as completed."}},"required":["jobId"]}, +}; export const ContextWrite: ToolCatalogEntry = { - id: 'context_write', - name: 'context_write', - route: 'go', - mode: 'sync', - parameters: { - type: 'object', - properties: { - content: { - type: 'string', - description: 'Full content to write to the file (replaces existing content)', - }, - file_path: { type: 'string', description: "Path of the file to write (e.g. 'SESSION.md')" }, - }, - required: ['file_path', 'content'], - }, -} + id: "context_write", + name: "context_write", + route: "go", + mode: "sync", + parameters: {"type":"object","properties":{"content":{"type":"string","description":"Full content to write to the file (replaces existing content)"},"file_path":{"type":"string","description":"Path of the file to write (e.g. 'SESSION.md')"}},"required":["file_path","content"]}, +}; export const CrawlWebsite: ToolCatalogEntry = { - id: 'crawl_website', - name: 'crawl_website', - route: 'go', - mode: 'sync', - parameters: { - type: 'object', - properties: { - exclude_paths: { - type: 'array', - description: 'Skip URLs matching these patterns', - items: { type: 'string' }, - }, - include_paths: { - type: 'array', - description: 'Only crawl URLs matching these patterns', - items: { type: 'string' }, - }, - limit: { type: 'number', description: 'Maximum pages to crawl (default 10, max 50)' }, - max_depth: { type: 'number', description: 'How deep to follow links (default 2)' }, - url: { type: 'string', description: 'Starting URL to crawl from' }, - }, - required: ['url'], - }, -} + id: "crawl_website", + name: "crawl_website", + route: "go", + mode: "sync", + parameters: {"type":"object","properties":{"exclude_paths":{"type":"array","description":"Skip URLs matching these patterns","items":{"type":"string"}},"include_paths":{"type":"array","description":"Only crawl URLs matching these patterns","items":{"type":"string"}},"limit":{"type":"number","description":"Maximum pages to crawl (default 10, max 50)"},"max_depth":{"type":"number","description":"How deep to follow links (default 2)"},"url":{"type":"string","description":"Starting URL to crawl from"}},"required":["url"]}, +}; export const CreateFile: ToolCatalogEntry = { - id: 'create_file', - name: 'create_file', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - contentType: { - type: 'string', - description: - 'Optional MIME type override. Usually omit and let the system infer from the file extension.', - }, - fileName: { - type: 'string', - description: - 'Plain workspace filename including extension, e.g. "main.py" or "report.md". Must not contain slashes.', - }, - }, - required: ['fileName'], - }, - resultSchema: { - type: 'object', - properties: { - data: { type: 'object', description: 'Contains id (the fileId) and name.' }, - message: { type: 'string', description: 'Human-readable outcome.' }, - success: { type: 'boolean', description: 'Whether the file was created.' }, - }, - required: ['success', 'message'], - }, - requiredPermission: 'write', -} + id: "create_file", + name: "create_file", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"contentType":{"type":"string","description":"Optional MIME type override. Usually omit and let the system infer from the file extension."},"fileName":{"type":"string","description":"Plain workspace filename including extension, e.g. \"main.py\" or \"report.md\". Must not contain slashes."}},"required":["fileName"]}, + resultSchema: {"type":"object","properties":{"data":{"type":"object","description":"Contains id (the fileId) and name."},"message":{"type":"string","description":"Human-readable outcome."},"success":{"type":"boolean","description":"Whether the file was created."}},"required":["success","message"]}, + requiredPermission: "write", +}; export const CreateFolder: ToolCatalogEntry = { - id: 'create_folder', - name: 'create_folder', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - name: { type: 'string', description: 'Folder name.' }, - parentId: { type: 'string', description: 'Optional parent folder ID.' }, - workspaceId: { type: 'string', description: 'Optional workspace ID.' }, - }, - required: ['name'], - }, - requiredPermission: 'write', -} + id: "create_folder", + name: "create_folder", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"name":{"type":"string","description":"Folder name."},"parentId":{"type":"string","description":"Optional parent folder ID."},"workspaceId":{"type":"string","description":"Optional workspace ID."}},"required":["name"]}, + requiredPermission: "write", +}; export const CreateJob: ToolCatalogEntry = { - id: 'create_job', - name: 'create_job', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - cron: { - type: 'string', - description: - "Cron expression for recurring jobs (e.g., '*/5 * * * *' for every 5 minutes, '0 9 * * *' for daily at 9 AM). Omit for one-time jobs.", - }, - lifecycle: { - type: 'string', - description: - "'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called after the success condition is met.", - enum: ['persistent', 'until_complete'], - }, - maxRuns: { - type: 'integer', - description: - 'Maximum number of executions before the job auto-completes. Safety limit to prevent runaway polling.', - }, - prompt: { - type: 'string', - description: - 'The prompt to execute when the job fires. This is sent to the Mothership as a user message.', - }, - successCondition: { - type: 'string', - description: - "What must happen for the job to be considered complete. Used with until_complete lifecycle (e.g., 'John has replied to the partnership email').", - }, - time: { - type: 'string', - description: - "ISO 8601 datetime for one-time execution or as the start time for a cron schedule (e.g., '2026-03-06T09:00:00'). Include timezone offset or use the timezone parameter.", - }, - timezone: { - type: 'string', - description: - "IANA timezone for the schedule (e.g., 'America/New_York', 'Europe/London'). Defaults to UTC.", - }, - title: { - type: 'string', - description: - "A short, descriptive title for the job (e.g., 'Email Poller', 'Daily Report'). Used as the display name.", - }, - }, - required: ['title', 'prompt'], - }, -} + id: "create_job", + name: "create_job", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"cron":{"type":"string","description":"Cron expression for recurring jobs (e.g., '*/5 * * * *' for every 5 minutes, '0 9 * * *' for daily at 9 AM). Omit for one-time jobs."},"lifecycle":{"type":"string","description":"'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called after the success condition is met.","enum":["persistent","until_complete"]},"maxRuns":{"type":"integer","description":"Maximum number of executions before the job auto-completes. Safety limit to prevent runaway polling."},"prompt":{"type":"string","description":"The prompt to execute when the job fires. This is sent to the Mothership as a user message."},"successCondition":{"type":"string","description":"What must happen for the job to be considered complete. Used with until_complete lifecycle (e.g., 'John has replied to the partnership email')."},"time":{"type":"string","description":"ISO 8601 datetime for one-time execution or as the start time for a cron schedule (e.g., '2026-03-06T09:00:00'). Include timezone offset or use the timezone parameter."},"timezone":{"type":"string","description":"IANA timezone for the schedule (e.g., 'America/New_York', 'Europe/London'). Defaults to UTC."},"title":{"type":"string","description":"A short, descriptive title for the job (e.g., 'Email Poller', 'Daily Report'). Used as the display name."}},"required":["title","prompt"]}, +}; export const CreateWorkflow: ToolCatalogEntry = { - id: 'create_workflow', - name: 'create_workflow', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - description: { type: 'string', description: 'Optional workflow description.' }, - folderId: { type: 'string', description: 'Optional folder ID.' }, - name: { type: 'string', description: 'Workflow name.' }, - workspaceId: { type: 'string', description: 'Optional workspace ID.' }, - }, - required: ['name'], - }, - requiredPermission: 'write', -} + id: "create_workflow", + name: "create_workflow", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"description":{"type":"string","description":"Optional workflow description."},"folderId":{"type":"string","description":"Optional folder ID."},"name":{"type":"string","description":"Workflow name."},"workspaceId":{"type":"string","description":"Optional workspace ID."}},"required":["name"]}, + requiredPermission: "write", +}; export const CreateWorkspaceMcpServer: ToolCatalogEntry = { - id: 'create_workspace_mcp_server', - name: 'create_workspace_mcp_server', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - description: { type: 'string', description: 'Optional description for the server' }, - name: { type: 'string', description: 'Required: server name' }, - workspaceId: { type: 'string', description: 'Workspace ID (defaults to current workspace)' }, - }, - required: ['name'], - }, + id: "create_workspace_mcp_server", + name: "create_workspace_mcp_server", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"description":{"type":"string","description":"Optional description for the server"},"name":{"type":"string","description":"Required: server name"},"workspaceId":{"type":"string","description":"Workspace ID (defaults to current workspace)"}},"required":["name"]}, requiresConfirmation: true, - requiredPermission: 'admin', -} + requiredPermission: "admin", +}; export const Debug: ToolCatalogEntry = { - id: 'debug', - name: 'debug', - route: 'subagent', - mode: 'async', - parameters: { - properties: { - context: { - description: - 'Pre-gathered context: workflow state JSON, block schemas, error logs. The debug agent will skip re-reading anything included here.', - type: 'string', - }, - request: { - description: - 'What to debug. Include error messages, block IDs, and any context about the failure.', - type: 'string', - }, - }, - required: ['request'], - type: 'object', - }, - subagentId: 'debug', + id: "debug", + name: "debug", + route: "subagent", + mode: "async", + parameters: {"properties":{"context":{"description":"Pre-gathered context: workflow state JSON, block schemas, error logs. The debug agent will skip re-reading anything included here.","type":"string"},"request":{"description":"What to debug. Include error messages, block IDs, and any context about the failure.","type":"string"}},"required":["request"],"type":"object"}, + subagentId: "debug", internal: true, -} +}; export const DeleteFile: ToolCatalogEntry = { - id: 'delete_file', - name: 'delete_file', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - fileIds: { - type: 'array', - description: 'Canonical workspace file IDs of the files to delete.', - items: { type: 'string' }, - }, - }, - required: ['fileIds'], - }, - resultSchema: { - type: 'object', - properties: { - message: { type: 'string', description: 'Human-readable outcome.' }, - success: { type: 'boolean', description: 'Whether the delete succeeded.' }, - }, - required: ['success', 'message'], - }, - requiredPermission: 'write', -} + id: "delete_file", + name: "delete_file", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"fileIds":{"type":"array","description":"Canonical workspace file IDs of the files to delete.","items":{"type":"string"}}},"required":["fileIds"]}, + resultSchema: {"type":"object","properties":{"message":{"type":"string","description":"Human-readable outcome."},"success":{"type":"boolean","description":"Whether the delete succeeded."}},"required":["success","message"]}, + requiredPermission: "write", +}; export const DeleteFolder: ToolCatalogEntry = { - id: 'delete_folder', - name: 'delete_folder', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - folderIds: { - type: 'array', - description: 'The folder IDs to delete.', - items: { type: 'string' }, - }, - }, - required: ['folderIds'], - }, + id: "delete_folder", + name: "delete_folder", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"folderIds":{"type":"array","description":"The folder IDs to delete.","items":{"type":"string"}}},"required":["folderIds"]}, requiresConfirmation: true, - requiredPermission: 'write', -} + requiredPermission: "write", +}; export const DeleteWorkflow: ToolCatalogEntry = { - id: 'delete_workflow', - name: 'delete_workflow', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - workflowIds: { - type: 'array', - description: 'The workflow IDs to delete.', - items: { type: 'string' }, - }, - }, - required: ['workflowIds'], - }, + id: "delete_workflow", + name: "delete_workflow", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"workflowIds":{"type":"array","description":"The workflow IDs to delete.","items":{"type":"string"}}},"required":["workflowIds"]}, requiresConfirmation: true, - requiredPermission: 'write', -} + requiredPermission: "write", +}; export const DeleteWorkspaceMcpServer: ToolCatalogEntry = { - id: 'delete_workspace_mcp_server', - name: 'delete_workspace_mcp_server', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - serverId: { type: 'string', description: 'Required: the MCP server ID to delete' }, - }, - required: ['serverId'], - }, + id: "delete_workspace_mcp_server", + name: "delete_workspace_mcp_server", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"serverId":{"type":"string","description":"Required: the MCP server ID to delete"}},"required":["serverId"]}, requiresConfirmation: true, - requiredPermission: 'admin', -} + requiredPermission: "admin", +}; export const Deploy: ToolCatalogEntry = { - id: 'deploy', - name: 'deploy', - route: 'subagent', - mode: 'async', - parameters: { - properties: { - request: { - description: - 'Detailed deployment instructions. Include deployment type (api/chat) and ALL user-specified options: identifier, title, description, authType, password, allowedEmails, welcomeMessage, outputConfigs (block outputs to display).', - type: 'string', - }, - }, - required: ['request'], - type: 'object', - }, - subagentId: 'deploy', + id: "deploy", + name: "deploy", + route: "subagent", + mode: "async", + parameters: {"properties":{"request":{"description":"Detailed deployment instructions. Include deployment type (api/chat) and ALL user-specified options: identifier, title, description, authType, password, allowedEmails, welcomeMessage, outputConfigs (block outputs to display).","type":"string"}},"required":["request"],"type":"object"}, + subagentId: "deploy", internal: true, -} +}; export const DeployApi: ToolCatalogEntry = { - id: 'deploy_api', - name: 'deploy_api', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - action: { - type: 'string', - description: 'Whether to deploy or undeploy the API endpoint', - enum: ['deploy', 'undeploy'], - default: 'deploy', - }, - workflowId: { - type: 'string', - description: 'Workflow ID to deploy (required in workspace context)', - }, - }, - }, - resultSchema: { - type: 'object', - properties: { - apiEndpoint: { type: 'string', description: 'Canonical workflow execution endpoint.' }, - baseUrl: { type: 'string', description: 'Base URL used to construct deployment URLs.' }, - deployedAt: { - type: 'string', - description: 'Deployment timestamp when the workflow is deployed.', - }, - deploymentConfig: { - type: 'object', - description: - 'Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details.', - }, - deploymentStatus: { - type: 'object', - description: 'Structured per-surface deployment status keyed by surface name, such as api.', - }, - deploymentType: { - type: 'string', - description: - 'Deployment surface this result describes. For deploy_api and redeploy this is always "api".', - }, - examples: { - type: 'object', - description: - 'Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling.', - }, - isDeployed: { - type: 'boolean', - description: 'Whether the workflow API is currently deployed after this tool call.', - }, - version: { - type: 'number', - description: 'Deployment version for the current API deployment.', - }, - workflowId: { type: 'string', description: 'Workflow ID that was deployed or undeployed.' }, - }, - required: [ - 'workflowId', - 'isDeployed', - 'deploymentType', - 'deploymentStatus', - 'deploymentConfig', - 'examples', - ], - }, + id: "deploy_api", + name: "deploy_api", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"action":{"type":"string","description":"Whether to deploy or undeploy the API endpoint","enum":["deploy","undeploy"],"default":"deploy"},"workflowId":{"type":"string","description":"Workflow ID to deploy (required in workspace context)"}}}, + resultSchema: {"type":"object","properties":{"apiEndpoint":{"type":"string","description":"Canonical workflow execution endpoint."},"baseUrl":{"type":"string","description":"Base URL used to construct deployment URLs."},"deployedAt":{"type":"string","description":"Deployment timestamp when the workflow is deployed."},"deploymentConfig":{"type":"object","description":"Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details."},"deploymentStatus":{"type":"object","description":"Structured per-surface deployment status keyed by surface name, such as api."},"deploymentType":{"type":"string","description":"Deployment surface this result describes. For deploy_api and redeploy this is always \"api\"."},"examples":{"type":"object","description":"Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling."},"isDeployed":{"type":"boolean","description":"Whether the workflow API is currently deployed after this tool call."},"version":{"type":"number","description":"Deployment version for the current API deployment."},"workflowId":{"type":"string","description":"Workflow ID that was deployed or undeployed."}},"required":["workflowId","isDeployed","deploymentType","deploymentStatus","deploymentConfig","examples"]}, requiresConfirmation: true, - requiredPermission: 'admin', -} + requiredPermission: "admin", +}; export const DeployChat: ToolCatalogEntry = { - id: 'deploy_chat', - name: 'deploy_chat', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - action: { - type: 'string', - description: 'Whether to deploy or undeploy the chat interface', - enum: ['deploy', 'undeploy'], - default: 'deploy', - }, - allowedEmails: { - type: 'array', - description: 'List of allowed emails/domains for email or SSO auth', - items: { type: 'string' }, - }, - authType: { - type: 'string', - description: 'Authentication type: public, password, email, or sso', - enum: ['public', 'password', 'email', 'sso'], - default: 'public', - }, - description: { type: 'string', description: 'Optional description for the chat' }, - identifier: { - type: 'string', - description: 'URL slug for the chat (lowercase letters, numbers, hyphens only)', - }, - outputConfigs: { - type: 'array', - description: 'Output configurations specifying which block outputs to display in chat', - items: { - type: 'object', - properties: { - blockId: { type: 'string', description: 'The block UUID' }, - path: { - type: 'string', - description: "The output path (e.g. 'response', 'response.content')", - }, - }, - required: ['blockId', 'path'], - }, - }, - password: { type: 'string', description: 'Password for password-protected chats' }, - title: { type: 'string', description: 'Display title for the chat interface' }, - welcomeMessage: { type: 'string', description: 'Welcome message shown to users' }, - workflowId: { - type: 'string', - description: 'Workflow ID to deploy (required in workspace context)', - }, - }, - }, - resultSchema: { - type: 'object', - properties: { - action: { - type: 'string', - description: 'Action performed by the tool, such as "deploy" or "undeploy".', - }, - apiEndpoint: { - type: 'string', - description: 'Paired workflow execution endpoint used by the chat deployment.', - }, - baseUrl: { type: 'string', description: 'Base URL used to construct deployment URLs.' }, - chatUrl: { - type: 'string', - description: 'Shareable chat URL when the chat surface is deployed.', - }, - deployedAt: { - type: 'string', - description: 'Deployment timestamp for the underlying workflow deployment.', - }, - deploymentConfig: { - type: 'object', - description: - 'Structured deployment configuration keyed by surface name. Includes chat settings and the paired API invocation configuration.', - }, - deploymentStatus: { - type: 'object', - description: - 'Structured per-surface deployment status keyed by surface name, including api and chat.', - }, - deploymentType: { - type: 'string', - description: - 'Deployment surface this result describes. For deploy_chat this is always "chat".', - }, - examples: { - type: 'object', - description: - 'Invocation examples keyed by surface name. Includes chat access details and API curl examples.', - }, - identifier: { type: 'string', description: 'Chat identifier or slug.' }, - isChatDeployed: { - type: 'boolean', - description: 'Whether the chat surface is deployed after this tool call.', - }, - isDeployed: { - type: 'boolean', - description: 'Whether the paired API surface remains deployed after this tool call.', - }, - success: { - type: 'boolean', - description: 'Whether the deploy_chat action completed successfully.', - }, - version: { - type: 'number', - description: 'Deployment version for the underlying workflow deployment.', - }, - workflowId: { - type: 'string', - description: 'Workflow ID associated with the chat deployment.', - }, - }, - required: [ - 'workflowId', - 'success', - 'action', - 'isDeployed', - 'isChatDeployed', - 'deploymentType', - 'deploymentStatus', - 'deploymentConfig', - 'examples', - ], - }, + id: "deploy_chat", + name: "deploy_chat", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"action":{"type":"string","description":"Whether to deploy or undeploy the chat interface","enum":["deploy","undeploy"],"default":"deploy"},"allowedEmails":{"type":"array","description":"List of allowed emails/domains for email or SSO auth","items":{"type":"string"}},"authType":{"type":"string","description":"Authentication type: public, password, email, or sso","enum":["public","password","email","sso"],"default":"public"},"description":{"type":"string","description":"Optional description for the chat"},"identifier":{"type":"string","description":"URL slug for the chat (lowercase letters, numbers, hyphens only)"},"outputConfigs":{"type":"array","description":"Output configurations specifying which block outputs to display in chat","items":{"type":"object","properties":{"blockId":{"type":"string","description":"The block UUID"},"path":{"type":"string","description":"The output path (e.g. 'response', 'response.content')"}},"required":["blockId","path"]}},"password":{"type":"string","description":"Password for password-protected chats"},"title":{"type":"string","description":"Display title for the chat interface"},"welcomeMessage":{"type":"string","description":"Welcome message shown to users"},"workflowId":{"type":"string","description":"Workflow ID to deploy (required in workspace context)"}}}, + resultSchema: {"type":"object","properties":{"action":{"type":"string","description":"Action performed by the tool, such as \"deploy\" or \"undeploy\"."},"apiEndpoint":{"type":"string","description":"Paired workflow execution endpoint used by the chat deployment."},"baseUrl":{"type":"string","description":"Base URL used to construct deployment URLs."},"chatUrl":{"type":"string","description":"Shareable chat URL when the chat surface is deployed."},"deployedAt":{"type":"string","description":"Deployment timestamp for the underlying workflow deployment."},"deploymentConfig":{"type":"object","description":"Structured deployment configuration keyed by surface name. Includes chat settings and the paired API invocation configuration."},"deploymentStatus":{"type":"object","description":"Structured per-surface deployment status keyed by surface name, including api and chat."},"deploymentType":{"type":"string","description":"Deployment surface this result describes. For deploy_chat this is always \"chat\"."},"examples":{"type":"object","description":"Invocation examples keyed by surface name. Includes chat access details and API curl examples."},"identifier":{"type":"string","description":"Chat identifier or slug."},"isChatDeployed":{"type":"boolean","description":"Whether the chat surface is deployed after this tool call."},"isDeployed":{"type":"boolean","description":"Whether the paired API surface remains deployed after this tool call."},"success":{"type":"boolean","description":"Whether the deploy_chat action completed successfully."},"version":{"type":"number","description":"Deployment version for the underlying workflow deployment."},"workflowId":{"type":"string","description":"Workflow ID associated with the chat deployment."}},"required":["workflowId","success","action","isDeployed","isChatDeployed","deploymentType","deploymentStatus","deploymentConfig","examples"]}, requiresConfirmation: true, - requiredPermission: 'admin', -} + requiredPermission: "admin", +}; export const DeployMcp: ToolCatalogEntry = { - id: 'deploy_mcp', - name: 'deploy_mcp', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - parameterDescriptions: { - type: 'array', - description: 'Array of parameter descriptions for the tool', - items: { - type: 'object', - properties: { - description: { type: 'string', description: 'Parameter description' }, - name: { type: 'string', description: 'Parameter name' }, - }, - required: ['name', 'description'], - }, - }, - serverId: { - type: 'string', - description: 'Required: server ID from list_workspace_mcp_servers', - }, - toolDescription: { type: 'string', description: 'Description for the MCP tool' }, - toolName: { - type: 'string', - description: 'Name for the MCP tool (defaults to workflow name)', - }, - workflowId: { type: 'string', description: 'Workflow ID (defaults to active workflow)' }, - }, - required: ['serverId'], - }, - resultSchema: { - type: 'object', - properties: { - action: { - type: 'string', - description: 'Action performed by the tool, such as "deploy" or "undeploy".', - }, - apiEndpoint: { - type: 'string', - description: 'Underlying workflow API endpoint associated with the MCP tool.', - }, - baseUrl: { type: 'string', description: 'Base URL used to construct deployment URLs.' }, - deploymentConfig: { - type: 'object', - description: - 'Structured deployment configuration keyed by surface name. Includes MCP server, tool, auth, and parameter schema details.', - }, - deploymentStatus: { - type: 'object', - description: - 'Structured per-surface deployment status keyed by surface name, including mcp and the underlying api surface when applicable.', - }, - deploymentType: { - type: 'string', - description: - 'Deployment surface this result describes. For deploy_mcp this is always "mcp".', - }, - examples: { - type: 'object', - description: - 'Setup examples keyed by surface name. Includes ready-to-paste config snippets for supported MCP clients.', - }, - mcpServerUrl: { type: 'string', description: 'HTTP MCP server URL to configure in clients.' }, - removed: { - type: 'boolean', - description: 'Whether the MCP deployment was removed during an undeploy action.', - }, - serverId: { type: 'string', description: 'Workspace MCP server ID.' }, - serverName: { type: 'string', description: 'Workspace MCP server name.' }, - toolDescription: { - type: 'string', - description: 'MCP tool description exposed on the server.', - }, - toolId: { type: 'string', description: 'MCP tool ID when deployed.' }, - toolName: { type: 'string', description: 'MCP tool name exposed on the server.' }, - updated: { - type: 'boolean', - description: 'Whether an existing MCP tool deployment was updated instead of created.', - }, - workflowId: { - type: 'string', - description: 'Workflow ID associated with the MCP deployment.', - }, - }, - required: ['deploymentType', 'deploymentStatus'], - }, + id: "deploy_mcp", + name: "deploy_mcp", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"parameterDescriptions":{"type":"array","description":"Array of parameter descriptions for the tool","items":{"type":"object","properties":{"description":{"type":"string","description":"Parameter description"},"name":{"type":"string","description":"Parameter name"}},"required":["name","description"]}},"serverId":{"type":"string","description":"Required: server ID from list_workspace_mcp_servers"},"toolDescription":{"type":"string","description":"Description for the MCP tool"},"toolName":{"type":"string","description":"Name for the MCP tool (defaults to workflow name)"},"workflowId":{"type":"string","description":"Workflow ID (defaults to active workflow)"}},"required":["serverId"]}, + resultSchema: {"type":"object","properties":{"action":{"type":"string","description":"Action performed by the tool, such as \"deploy\" or \"undeploy\"."},"apiEndpoint":{"type":"string","description":"Underlying workflow API endpoint associated with the MCP tool."},"baseUrl":{"type":"string","description":"Base URL used to construct deployment URLs."},"deploymentConfig":{"type":"object","description":"Structured deployment configuration keyed by surface name. Includes MCP server, tool, auth, and parameter schema details."},"deploymentStatus":{"type":"object","description":"Structured per-surface deployment status keyed by surface name, including mcp and the underlying api surface when applicable."},"deploymentType":{"type":"string","description":"Deployment surface this result describes. For deploy_mcp this is always \"mcp\"."},"examples":{"type":"object","description":"Setup examples keyed by surface name. Includes ready-to-paste config snippets for supported MCP clients."},"mcpServerUrl":{"type":"string","description":"HTTP MCP server URL to configure in clients."},"removed":{"type":"boolean","description":"Whether the MCP deployment was removed during an undeploy action."},"serverId":{"type":"string","description":"Workspace MCP server ID."},"serverName":{"type":"string","description":"Workspace MCP server name."},"toolDescription":{"type":"string","description":"MCP tool description exposed on the server."},"toolId":{"type":"string","description":"MCP tool ID when deployed."},"toolName":{"type":"string","description":"MCP tool name exposed on the server."},"updated":{"type":"boolean","description":"Whether an existing MCP tool deployment was updated instead of created."},"workflowId":{"type":"string","description":"Workflow ID associated with the MCP deployment."}},"required":["deploymentType","deploymentStatus"]}, requiresConfirmation: true, - requiredPermission: 'admin', -} + requiredPermission: "admin", +}; export const DownloadToWorkspaceFile: ToolCatalogEntry = { - id: 'download_to_workspace_file', - name: 'download_to_workspace_file', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - fileName: { - type: 'string', - description: - 'Optional workspace file name to save as. If omitted, the name is inferred from the response or URL.', - }, - url: { - type: 'string', - description: - 'Direct URL of the file to download, such as an image CDN URL ending in .png or .jpg', - }, - }, - required: ['url'], - }, - requiredPermission: 'write', -} + id: "download_to_workspace_file", + name: "download_to_workspace_file", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"fileName":{"type":"string","description":"Optional workspace file name to save as. If omitted, the name is inferred from the response or URL."},"url":{"type":"string","description":"Direct URL of the file to download, such as an image CDN URL ending in .png or .jpg"}},"required":["url"]}, + requiredPermission: "write", +}; export const EditContent: ToolCatalogEntry = { - id: 'edit_content', - name: 'edit_content', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - content: { - type: 'string', - description: - 'The text content to write. For append: text to append. For update: full replacement text. For patch with search_replace: the replacement text. For patch with anchored: the insert/replacement text.', - }, - }, - required: ['content'], - }, - resultSchema: { - type: 'object', - properties: { - data: { - type: 'object', - description: - 'Optional operation metadata such as file id, file name, size, and content type.', - }, - message: { type: 'string', description: 'Human-readable summary of the outcome.' }, - success: { type: 'boolean', description: 'Whether the content was applied successfully.' }, - }, - required: ['success', 'message'], - }, - requiredPermission: 'write', -} + id: "edit_content", + name: "edit_content", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"content":{"type":"string","description":"The text content to write. For append: text to append. For update: full replacement text. For patch with search_replace: the replacement text. For patch with anchored: the insert/replacement text."}},"required":["content"]}, + resultSchema: {"type":"object","properties":{"data":{"type":"object","description":"Optional operation metadata such as file id, file name, size, and content type."},"message":{"type":"string","description":"Human-readable summary of the outcome."},"success":{"type":"boolean","description":"Whether the content was applied successfully."}},"required":["success","message"]}, + requiredPermission: "write", +}; export const EditWorkflow: ToolCatalogEntry = { - id: 'edit_workflow', - name: 'edit_workflow', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - operations: { - type: 'array', - description: 'Array of edit operations', - items: { - type: 'object', - properties: { - block_id: { - type: 'string', - description: - 'Block ID for the operation. For add operations, this will be the desired ID for the new block.', - }, - operation_type: { - type: 'string', - description: 'Type of operation to perform', - enum: ['add', 'edit', 'delete', 'insert_into_subflow', 'extract_from_subflow'], - }, - params: { - type: 'object', - description: - 'Parameters for the operation. \nFor edit: {"inputs": {"temperature": 0.5}} NOT {"subBlocks": {"temperature": {"value": 0.5}}}\nFor add: {"type": "agent", "name": "My Agent", "inputs": {"model": "gpt-4o"}}\nFor delete: {} (empty object)', - }, - }, - required: ['operation_type', 'block_id', 'params'], - }, - }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID to edit. If not provided, uses the current workflow in context.', - }, - }, - required: ['operations'], - }, - requiredPermission: 'write', -} + id: "edit_workflow", + name: "edit_workflow", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"operations":{"type":"array","description":"Array of edit operations","items":{"type":"object","properties":{"block_id":{"type":"string","description":"Block ID for the operation. For add operations, this will be the desired ID for the new block."},"operation_type":{"type":"string","description":"Type of operation to perform","enum":["add","edit","delete","insert_into_subflow","extract_from_subflow"]},"params":{"type":"object","description":"Parameters for the operation. \nFor edit: {\"inputs\": {\"temperature\": 0.5}} NOT {\"subBlocks\": {\"temperature\": {\"value\": 0.5}}}\nFor add: {\"type\": \"agent\", \"name\": \"My Agent\", \"inputs\": {\"model\": \"gpt-4o\"}}\nFor delete: {} (empty object)"}},"required":["operation_type","block_id","params"]}},"workflowId":{"type":"string","description":"Optional workflow ID to edit. If not provided, uses the current workflow in context."}},"required":["operations"]}, + requiredPermission: "write", +}; export const File: ToolCatalogEntry = { - id: 'file', - name: 'file', - route: 'subagent', - mode: 'async', - parameters: { type: 'object' }, - subagentId: 'file', + id: "file", + name: "file", + route: "subagent", + mode: "async", + parameters: {"type":"object"}, + subagentId: "file", internal: true, -} +}; export const FunctionExecute: ToolCatalogEntry = { - id: 'function_execute', - name: 'function_execute', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - code: { - type: 'string', - description: - 'Code to execute. For JS: raw statements auto-wrapped in async context. For Python: full script. For shell: bash script with access to pre-installed CLI tools and workspace env vars as $VAR_NAME.', - }, - inputFiles: { - type: 'array', - description: - 'Canonical workspace file IDs to mount in the sandbox. Discover IDs via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json"). Mounted path: /home/user/files/{fileId}/{originalName}. Example: ["wf_123"]', - items: { type: 'string' }, - }, - inputTables: { - type: 'array', - description: - 'Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Example: ["tbl_abc123"]', - items: { type: 'string' }, - }, - language: { - type: 'string', - description: 'Execution language.', - enum: ['javascript', 'python', 'shell'], - }, - outputFormat: { - type: 'string', - description: - 'Format for outputPath. Determines how the code result is serialized. If omitted, inferred from outputPath file extension.', - enum: ['json', 'csv', 'txt', 'md', 'html'], - }, - outputMimeType: { - type: 'string', - description: - 'MIME type for outputSandboxPath export. Required for binary files: image/png, image/jpeg, application/pdf, etc. Omit for text files.', - }, - outputPath: { - type: 'string', - description: - 'Pipe output directly to a NEW workspace file instead of returning in context. ALWAYS use this instead of a separate workspace_file write call. Use a flat path like "files/result.json" — nested paths are not supported.', - }, - outputSandboxPath: { - type: 'string', - description: - 'Path to a file created inside the sandbox that should be exported to the workspace. Use together with outputPath.', - }, - outputTable: { - type: 'string', - description: - 'Table ID to overwrite with the code\'s return value. Code MUST return an array of objects where keys match column names. All existing rows are replaced. Example: "tbl_abc123"', - }, - }, - required: ['code'], - }, - requiredPermission: 'write', -} + id: "function_execute", + name: "function_execute", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"code":{"type":"string","description":"Code to execute. For JS: raw statements auto-wrapped in async context. For Python: full script. For shell: bash script with access to pre-installed CLI tools and workspace env vars as $VAR_NAME."},"inputFiles":{"type":"array","description":"Canonical workspace file IDs to mount in the sandbox. Discover IDs via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\"). Mounted path: /home/user/files/{fileId}/{originalName}. Example: [\"wf_123\"]","items":{"type":"string"}},"inputTables":{"type":"array","description":"Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Example: [\"tbl_abc123\"]","items":{"type":"string"}},"language":{"type":"string","description":"Execution language.","enum":["javascript","python","shell"]},"outputFormat":{"type":"string","description":"Format for outputPath. Determines how the code result is serialized. If omitted, inferred from outputPath file extension.","enum":["json","csv","txt","md","html"]},"outputMimeType":{"type":"string","description":"MIME type for outputSandboxPath export. Required for binary files: image/png, image/jpeg, application/pdf, etc. Omit for text files."},"outputPath":{"type":"string","description":"Pipe output directly to a NEW workspace file instead of returning in context. ALWAYS use this instead of a separate workspace_file write call. Use a flat path like \"files/result.json\" — nested paths are not supported."},"outputSandboxPath":{"type":"string","description":"Path to a file created inside the sandbox that should be exported to the workspace. Use together with outputPath."},"outputTable":{"type":"string","description":"Table ID to overwrite with the code's return value. Code MUST return an array of objects where keys match column names. All existing rows are replaced. Example: \"tbl_abc123\""}},"required":["code"]}, + requiredPermission: "write", +}; export const GenerateApiKey: ToolCatalogEntry = { - id: 'generate_api_key', - name: 'generate_api_key', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - name: { - type: 'string', - description: "A descriptive name for the API key (e.g., 'production-key', 'dev-testing').", - }, - workspaceId: { - type: 'string', - description: "Optional workspace ID. Defaults to user's default workspace.", - }, - }, - required: ['name'], - }, + id: "generate_api_key", + name: "generate_api_key", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"name":{"type":"string","description":"A descriptive name for the API key (e.g., 'production-key', 'dev-testing')."},"workspaceId":{"type":"string","description":"Optional workspace ID. Defaults to user's default workspace."}},"required":["name"]}, requiresConfirmation: true, - requiredPermission: 'admin', -} + requiredPermission: "admin", +}; export const GenerateImage: ToolCatalogEntry = { - id: 'generate_image', - name: 'generate_image', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - aspectRatio: { - type: 'string', - description: 'Aspect ratio for the generated image.', - enum: ['1:1', '16:9', '9:16', '4:3', '3:4'], - }, - fileName: { - type: 'string', - description: - 'Output file name. Defaults to "generated-image.png". Workspace files are flat, so pass a plain file name, not a nested path.', - }, - overwriteFileId: { - type: 'string', - description: - 'If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated image so the existing chat resource stays current instead of creating a duplicate like "image (1).png". The file ID is returned by previous generate_image or generate_visualization calls (fileId field), or can be found via read("files/by-id/{fileId}/meta.json").', - }, - prompt: { - type: 'string', - description: - 'Detailed text description of the image to generate, or editing instructions when used with editFileId.', - }, - referenceFileIds: { - type: 'array', - description: - 'File IDs of workspace images to include as context for the generation. All images are sent alongside the prompt. Use for: editing a single image (1 file), compositing multiple images together (2+ files), style transfer, face swapping, etc. Order matters — list the primary/base image first. When revising an existing image in place, pair the primary file ID here with overwriteFileId set to that same ID.', - items: { type: 'string' }, - }, - }, - required: ['prompt'], - }, - requiredPermission: 'write', -} + id: "generate_image", + name: "generate_image", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"aspectRatio":{"type":"string","description":"Aspect ratio for the generated image.","enum":["1:1","16:9","9:16","4:3","3:4"]},"fileName":{"type":"string","description":"Output file name. Defaults to \"generated-image.png\". Workspace files are flat, so pass a plain file name, not a nested path."},"overwriteFileId":{"type":"string","description":"If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated image so the existing chat resource stays current instead of creating a duplicate like \"image (1).png\". The file ID is returned by previous generate_image or generate_visualization calls (fileId field), or can be found via read(\"files/by-id/{fileId}/meta.json\")."},"prompt":{"type":"string","description":"Detailed text description of the image to generate, or editing instructions when used with editFileId."},"referenceFileIds":{"type":"array","description":"File IDs of workspace images to include as context for the generation. All images are sent alongside the prompt. Use for: editing a single image (1 file), compositing multiple images together (2+ files), style transfer, face swapping, etc. Order matters — list the primary/base image first. When revising an existing image in place, pair the primary file ID here with overwriteFileId set to that same ID.","items":{"type":"string"}}},"required":["prompt"]}, + requiredPermission: "write", +}; export const GenerateVisualization: ToolCatalogEntry = { - id: 'generate_visualization', - name: 'generate_visualization', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - code: { - type: 'string', - description: - "Python code that generates a visualization using matplotlib. MUST call plt.savefig('/home/user/output.png', dpi=150, bbox_inches='tight') to produce output.", - }, - fileName: { - type: 'string', - description: - 'Output file name. Defaults to "chart.png". Workspace files are flat, so pass a plain file name, not a nested path.', - }, - inputFiles: { - type: 'array', - description: - 'Canonical workspace file IDs to mount in the sandbox. Discover IDs via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json"). Mounted path: /home/user/files/{fileId}/{originalName}.', - items: { type: 'string' }, - }, - inputTables: { - type: 'array', - description: - "Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Read with pandas: pd.read_csv('/home/user/tables/tbl_xxx.csv')", - items: { type: 'string' }, - }, - overwriteFileId: { - type: 'string', - description: - 'If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated chart so the existing chat resource stays current instead of creating a duplicate like "chart (1).png". The file ID is returned by previous generate_visualization or generate_image calls (fileId field), or can be found via read("files/by-id/{fileId}/meta.json").', - }, - }, - required: ['code'], - }, - requiredPermission: 'write', -} + id: "generate_visualization", + name: "generate_visualization", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"code":{"type":"string","description":"Python code that generates a visualization using matplotlib. MUST call plt.savefig('/home/user/output.png', dpi=150, bbox_inches='tight') to produce output."},"fileName":{"type":"string","description":"Output file name. Defaults to \"chart.png\". Workspace files are flat, so pass a plain file name, not a nested path."},"inputFiles":{"type":"array","description":"Canonical workspace file IDs to mount in the sandbox. Discover IDs via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\"). Mounted path: /home/user/files/{fileId}/{originalName}.","items":{"type":"string"}},"inputTables":{"type":"array","description":"Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Read with pandas: pd.read_csv('/home/user/tables/tbl_xxx.csv')","items":{"type":"string"}},"overwriteFileId":{"type":"string","description":"If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated chart so the existing chat resource stays current instead of creating a duplicate like \"chart (1).png\". The file ID is returned by previous generate_visualization or generate_image calls (fileId field), or can be found via read(\"files/by-id/{fileId}/meta.json\")."}},"required":["code"]}, + requiredPermission: "write", +}; export const GetBlockOutputs: ToolCatalogEntry = { - id: 'get_block_outputs', - name: 'get_block_outputs', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - blockIds: { - type: 'array', - description: - 'Optional array of block UUIDs. If provided, returns outputs only for those blocks. If not provided, returns outputs for all blocks in the workflow.', - items: { type: 'string' }, - }, - workflowId: { - type: 'string', - description: 'Optional workflow ID. If not provided, uses the current workflow in context.', - }, - }, - }, -} + id: "get_block_outputs", + name: "get_block_outputs", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"blockIds":{"type":"array","description":"Optional array of block UUIDs. If provided, returns outputs only for those blocks. If not provided, returns outputs for all blocks in the workflow.","items":{"type":"string"}},"workflowId":{"type":"string","description":"Optional workflow ID. If not provided, uses the current workflow in context."}}}, +}; export const GetBlockUpstreamReferences: ToolCatalogEntry = { - id: 'get_block_upstream_references', - name: 'get_block_upstream_references', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - blockIds: { - type: 'array', - description: - 'Required array of block UUIDs (minimum 1). Returns what each block can reference based on its position in the workflow graph.', - items: { type: 'string' }, - }, - workflowId: { - type: 'string', - description: 'Optional workflow ID. If not provided, uses the current workflow in context.', - }, - }, - required: ['blockIds'], - }, -} + id: "get_block_upstream_references", + name: "get_block_upstream_references", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"blockIds":{"type":"array","description":"Required array of block UUIDs (minimum 1). Returns what each block can reference based on its position in the workflow graph.","items":{"type":"string"}},"workflowId":{"type":"string","description":"Optional workflow ID. If not provided, uses the current workflow in context."}},"required":["blockIds"]}, +}; export const GetDeployedWorkflowState: ToolCatalogEntry = { - id: 'get_deployed_workflow_state', - name: 'get_deployed_workflow_state', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - workflowId: { - type: 'string', - description: 'Optional workflow ID. If not provided, uses the current workflow in context.', - }, - }, - }, -} + id: "get_deployed_workflow_state", + name: "get_deployed_workflow_state", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"workflowId":{"type":"string","description":"Optional workflow ID. If not provided, uses the current workflow in context."}}}, +}; export const GetDeploymentVersion: ToolCatalogEntry = { - id: 'get_deployment_version', - name: 'get_deployment_version', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - version: { type: 'number', description: 'The deployment version number' }, - workflowId: { type: 'string', description: 'The workflow ID' }, - }, - required: ['workflowId', 'version'], - }, -} + id: "get_deployment_version", + name: "get_deployment_version", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"version":{"type":"number","description":"The deployment version number"},"workflowId":{"type":"string","description":"The workflow ID"}},"required":["workflowId","version"]}, +}; export const GetExecutionSummary: ToolCatalogEntry = { - id: 'get_execution_summary', - name: 'get_execution_summary', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - limit: { - type: 'number', - description: 'Max number of executions to return (default: 10, max: 20).', - }, - status: { - type: 'string', - description: "Filter by status: 'success', 'error', or 'all' (default: 'all').", - enum: ['success', 'error', 'all'], - }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID. If omitted, returns executions across all workflows in the workspace.', - }, - workspaceId: { type: 'string', description: 'Workspace ID to scope executions to.' }, - }, - required: ['workspaceId'], - }, -} + id: "get_execution_summary", + name: "get_execution_summary", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"limit":{"type":"number","description":"Max number of executions to return (default: 10, max: 20)."},"status":{"type":"string","description":"Filter by status: 'success', 'error', or 'all' (default: 'all').","enum":["success","error","all"]},"workflowId":{"type":"string","description":"Optional workflow ID. If omitted, returns executions across all workflows in the workspace."},"workspaceId":{"type":"string","description":"Workspace ID to scope executions to."}},"required":["workspaceId"]}, +}; export const GetJobLogs: ToolCatalogEntry = { - id: 'get_job_logs', - name: 'get_job_logs', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - executionId: { type: 'string', description: 'Optional execution ID for a specific run.' }, - includeDetails: { - type: 'boolean', - description: 'Include tool calls, outputs, and cost details.', - }, - jobId: { type: 'string', description: 'The job (schedule) ID to get logs for.' }, - limit: { type: 'number', description: 'Max number of entries (default: 3, max: 5)' }, - }, - required: ['jobId'], - }, -} + id: "get_job_logs", + name: "get_job_logs", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"executionId":{"type":"string","description":"Optional execution ID for a specific run."},"includeDetails":{"type":"boolean","description":"Include tool calls, outputs, and cost details."},"jobId":{"type":"string","description":"The job (schedule) ID to get logs for."},"limit":{"type":"number","description":"Max number of entries (default: 3, max: 5)"}},"required":["jobId"]}, +}; export const GetPageContents: ToolCatalogEntry = { - id: 'get_page_contents', - name: 'get_page_contents', - route: 'go', - mode: 'sync', - parameters: { - type: 'object', - properties: { - include_highlights: { - type: 'boolean', - description: 'Include key highlights (default false)', - }, - include_summary: { - type: 'boolean', - description: 'Include AI-generated summary (default false)', - }, - include_text: { type: 'boolean', description: 'Include full page text (default true)' }, - urls: { - type: 'array', - description: 'URLs to get content from (max 10)', - items: { type: 'string' }, - }, - }, - required: ['urls'], - }, -} + id: "get_page_contents", + name: "get_page_contents", + route: "go", + mode: "sync", + parameters: {"type":"object","properties":{"include_highlights":{"type":"boolean","description":"Include key highlights (default false)"},"include_summary":{"type":"boolean","description":"Include AI-generated summary (default false)"},"include_text":{"type":"boolean","description":"Include full page text (default true)"},"urls":{"type":"array","description":"URLs to get content from (max 10)","items":{"type":"string"}}},"required":["urls"]}, +}; export const GetPlatformActions: ToolCatalogEntry = { - id: 'get_platform_actions', - name: 'get_platform_actions', - route: 'sim', - mode: 'async', - parameters: { type: 'object', properties: {} }, -} + id: "get_platform_actions", + name: "get_platform_actions", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{}}, +}; export const GetWorkflowData: ToolCatalogEntry = { - id: 'get_workflow_data', - name: 'get_workflow_data', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - data_type: { - type: 'string', - description: 'The type of workflow data to retrieve', - enum: ['global_variables', 'custom_tools', 'mcp_tools', 'files'], - }, - workflowId: { - type: 'string', - description: 'Optional workflow ID. If not provided, uses the current workflow in context.', - }, - }, - required: ['data_type'], - }, -} + id: "get_workflow_data", + name: "get_workflow_data", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"data_type":{"type":"string","description":"The type of workflow data to retrieve","enum":["global_variables","custom_tools","mcp_tools","files"]},"workflowId":{"type":"string","description":"Optional workflow ID. If not provided, uses the current workflow in context."}},"required":["data_type"]}, +}; export const GetWorkflowLogs: ToolCatalogEntry = { - id: 'get_workflow_logs', - name: 'get_workflow_logs', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - executionId: { - type: 'string', - description: - 'Optional execution ID to get logs for a specific execution. Use with get_execution_summary to find execution IDs first.', - }, - includeDetails: { type: 'boolean', description: 'Include detailed info' }, - limit: { type: 'number', description: 'Max number of entries (hard limit: 3)' }, - workflowId: { - type: 'string', - description: 'Optional workflow ID. If not provided, uses the current workflow in context.', - }, - }, - }, -} + id: "get_workflow_logs", + name: "get_workflow_logs", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"executionId":{"type":"string","description":"Optional execution ID to get logs for a specific execution. Use with get_execution_summary to find execution IDs first."},"includeDetails":{"type":"boolean","description":"Include detailed info"},"limit":{"type":"number","description":"Max number of entries (hard limit: 3)"},"workflowId":{"type":"string","description":"Optional workflow ID. If not provided, uses the current workflow in context."}}}, +}; export const Glob: ToolCatalogEntry = { - id: 'glob', - name: 'glob', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - pattern: { - type: 'string', - description: - 'Glob pattern to match file paths. Supports * (any segment) and ** (any depth).', - }, - toolTitle: { - type: 'string', - description: - 'Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like "workflow configs" or "knowledge bases", not a full sentence like "Finding workflow configs".', - }, - }, - required: ['pattern', 'toolTitle'], - }, -} + id: "glob", + name: "glob", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"pattern":{"type":"string","description":"Glob pattern to match file paths. Supports * (any segment) and ** (any depth)."},"toolTitle":{"type":"string","description":"Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like \"workflow configs\" or \"knowledge bases\", not a full sentence like \"Finding workflow configs\"."}},"required":["pattern","toolTitle"]}, +}; export const Grep: ToolCatalogEntry = { - id: 'grep', - name: 'grep', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - context: { - type: 'number', - description: - "Number of lines to show before and after each match. Only applies to output_mode 'content'.", - }, - ignoreCase: { type: 'boolean', description: 'Case insensitive search (default false).' }, - lineNumbers: { - type: 'boolean', - description: - "Include line numbers in output (default true). Only applies to output_mode 'content'.", - }, - maxResults: { - type: 'number', - description: 'Maximum number of matches to return (default 50).', - }, - output_mode: { - type: 'string', - description: - "Output mode: 'content' shows matching lines (default), 'files_with_matches' shows only file paths, 'count' shows match counts per file.", - enum: ['content', 'files_with_matches', 'count'], - }, - path: { - type: 'string', - description: - "Optional path prefix to scope the search (e.g. 'workflows/', 'environment/', 'internal/', 'components/blocks/').", - }, - pattern: { type: 'string', description: 'Regex pattern to search for in file contents.' }, - toolTitle: { - type: 'string', - description: - 'Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like "Slack integrations" or "deployed workflows", not a full sentence like "Searching for Slack integrations".', - }, - }, - required: ['pattern', 'toolTitle'], - }, -} + id: "grep", + name: "grep", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"context":{"type":"number","description":"Number of lines to show before and after each match. Only applies to output_mode 'content'."},"ignoreCase":{"type":"boolean","description":"Case insensitive search (default false)."},"lineNumbers":{"type":"boolean","description":"Include line numbers in output (default true). Only applies to output_mode 'content'."},"maxResults":{"type":"number","description":"Maximum number of matches to return (default 50)."},"output_mode":{"type":"string","description":"Output mode: 'content' shows matching lines (default), 'files_with_matches' shows only file paths, 'count' shows match counts per file.","enum":["content","files_with_matches","count"]},"path":{"type":"string","description":"Optional path prefix to scope the search (e.g. 'workflows/', 'environment/', 'internal/', 'components/blocks/')."},"pattern":{"type":"string","description":"Regex pattern to search for in file contents."},"toolTitle":{"type":"string","description":"Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like \"Slack integrations\" or \"deployed workflows\", not a full sentence like \"Searching for Slack integrations\"."}},"required":["pattern","toolTitle"]}, +}; export const Job: ToolCatalogEntry = { - id: 'job', - name: 'job', - route: 'subagent', - mode: 'async', - parameters: { - properties: { request: { description: 'What job action is needed.', type: 'string' } }, - required: ['request'], - type: 'object', - }, - subagentId: 'job', + id: "job", + name: "job", + route: "subagent", + mode: "async", + parameters: {"properties":{"request":{"description":"What job action is needed.","type":"string"}},"required":["request"],"type":"object"}, + subagentId: "job", internal: true, -} +}; export const Knowledge: ToolCatalogEntry = { - id: 'knowledge', - name: 'knowledge', - route: 'subagent', - mode: 'async', - parameters: { - properties: { - request: { description: 'What knowledge base action is needed.', type: 'string' }, - }, - required: ['request'], - type: 'object', - }, - subagentId: 'knowledge', + id: "knowledge", + name: "knowledge", + route: "subagent", + mode: "async", + parameters: {"properties":{"request":{"description":"What knowledge base action is needed.","type":"string"}},"required":["request"],"type":"object"}, + subagentId: "knowledge", internal: true, -} +}; export const KnowledgeBase: ToolCatalogEntry = { - id: 'knowledge_base', - name: 'knowledge_base', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - args: { - type: 'object', - description: 'Arguments for the operation', - properties: { - apiKey: { - type: 'string', - description: - 'API key for API-key-based connectors (required when connector auth mode is apiKey)', - }, - chunkingConfig: { - type: 'object', - description: "Chunking configuration (optional for 'create')", - properties: { - maxSize: { - type: 'number', - description: 'Maximum chunk size (100-4000, default: 1024)', - default: 1024, - }, - minSize: { - type: 'number', - description: 'Minimum chunk size (1-2000, default: 1)', - default: 1, - }, - overlap: { - type: 'number', - description: 'Overlap between chunks (0-500, default: 200)', - default: 200, - }, - }, - }, - connectorId: { - type: 'string', - description: - 'Connector ID (required for update_connector, delete_connector, sync_connector)', - }, - connectorStatus: { - type: 'string', - description: 'Connector status (optional for update_connector)', - enum: ['active', 'paused'], - }, - connectorType: { - type: 'string', - description: - "Connector type from registry, e.g. 'confluence', 'google_drive', 'notion' (required for add_connector). Read knowledgebases/connectors/{type}.json for the config schema.", - }, - credentialId: { - type: 'string', - description: - 'OAuth credential ID from environment/credentials.json (required for OAuth connectors)', - }, - description: { - type: 'string', - description: "Description of the knowledge base (optional for 'create')", - }, - disabledTagIds: { - type: 'array', - description: - 'Tag definition IDs to opt out of (optional for add_connector). See tagDefinitions in the connector schema.', - }, - documentId: { type: 'string', description: 'Document ID (required for update_document)' }, - documentIds: { - type: 'array', - description: 'Document IDs (for batch delete_document)', - items: { type: 'string' }, - }, - enabled: { - type: 'boolean', - description: 'Enable/disable a document (optional for update_document)', - }, - fileIds: { - type: 'array', - description: - 'Canonical workspace file IDs to add as documents (for add_file). Discover via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json").', - items: { type: 'string' }, - }, - filename: { - type: 'string', - description: 'New filename for a document (optional for update_document)', - }, - knowledgeBaseId: { - type: 'string', - description: - 'Knowledge base ID (required for get, query, add_file, list_tags, create_tag, get_tag_usage)', - }, - knowledgeBaseIds: { - type: 'array', - description: 'Knowledge base IDs (for batch delete)', - items: { type: 'string' }, - }, - name: { - type: 'string', - description: "Name of the knowledge base (required for 'create')", - }, - query: { type: 'string', description: "Search query text (required for 'query')" }, - sourceConfig: { - type: 'object', - description: - 'Connector-specific configuration matching the configFields in knowledgebases/connectors/{type}.json', - }, - syncIntervalMinutes: { - type: 'number', - description: - 'Sync interval in minutes: 60 (hourly), 360 (6h), 1440 (daily), 10080 (weekly), 0 (manual only). Default: 1440', - default: 1440, - }, - tagDefinitionId: { - type: 'string', - description: 'Tag definition ID (required for update_tag, delete_tag)', - }, - tagDisplayName: { - type: 'string', - description: - 'Display name for the tag (required for create_tag, optional for update_tag)', - }, - tagFieldType: { - type: 'string', - description: - 'Field type: text, number, date, boolean (optional for create_tag, defaults to text)', - enum: ['text', 'number', 'date', 'boolean'], - }, - topK: { - type: 'number', - description: 'Number of results to return (1-50, default: 5)', - default: 5, - }, - workspaceId: { - type: 'string', - description: "Workspace ID (required for 'create', optional filter for 'list')", - }, - }, - }, - operation: { - type: 'string', - description: 'The operation to perform', - enum: [ - 'create', - 'get', - 'query', - 'add_file', - 'update', - 'delete', - 'delete_document', - 'update_document', - 'list_tags', - 'create_tag', - 'update_tag', - 'delete_tag', - 'get_tag_usage', - 'add_connector', - 'update_connector', - 'delete_connector', - 'sync_connector', - ], - }, - }, - required: ['operation', 'args'], - }, - resultSchema: { - type: 'object', - properties: { - data: { type: 'object', description: 'Operation-specific result payload.' }, - message: { type: 'string', description: 'Human-readable outcome summary.' }, - success: { type: 'boolean', description: 'Whether the operation succeeded.' }, - }, - required: ['success', 'message'], - }, + id: "knowledge_base", + name: "knowledge_base", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"args":{"type":"object","description":"Arguments for the operation","properties":{"apiKey":{"type":"string","description":"API key for API-key-based connectors (required when connector auth mode is apiKey)"},"chunkingConfig":{"type":"object","description":"Chunking configuration (optional for 'create')","properties":{"maxSize":{"type":"number","description":"Maximum chunk size (100-4000, default: 1024)","default":1024},"minSize":{"type":"number","description":"Minimum chunk size (1-2000, default: 1)","default":1},"overlap":{"type":"number","description":"Overlap between chunks (0-500, default: 200)","default":200}}},"connectorId":{"type":"string","description":"Connector ID (required for update_connector, delete_connector, sync_connector)"},"connectorStatus":{"type":"string","description":"Connector status (optional for update_connector)","enum":["active","paused"]},"connectorType":{"type":"string","description":"Connector type from registry, e.g. 'confluence', 'google_drive', 'notion' (required for add_connector). Read knowledgebases/connectors/{type}.json for the config schema."},"credentialId":{"type":"string","description":"OAuth credential ID from environment/credentials.json (required for OAuth connectors)"},"description":{"type":"string","description":"Description of the knowledge base (optional for 'create')"},"disabledTagIds":{"type":"array","description":"Tag definition IDs to opt out of (optional for add_connector). See tagDefinitions in the connector schema."},"documentId":{"type":"string","description":"Document ID (required for update_document)"},"documentIds":{"type":"array","description":"Document IDs (for batch delete_document)","items":{"type":"string"}},"enabled":{"type":"boolean","description":"Enable/disable a document (optional for update_document)"},"fileIds":{"type":"array","description":"Canonical workspace file IDs to add as documents (for add_file). Discover via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\").","items":{"type":"string"}},"filename":{"type":"string","description":"New filename for a document (optional for update_document)"},"knowledgeBaseId":{"type":"string","description":"Knowledge base ID (required for get, query, add_file, list_tags, create_tag, get_tag_usage)"},"knowledgeBaseIds":{"type":"array","description":"Knowledge base IDs (for batch delete)","items":{"type":"string"}},"name":{"type":"string","description":"Name of the knowledge base (required for 'create')"},"query":{"type":"string","description":"Search query text (required for 'query')"},"sourceConfig":{"type":"object","description":"Connector-specific configuration matching the configFields in knowledgebases/connectors/{type}.json"},"syncIntervalMinutes":{"type":"number","description":"Sync interval in minutes: 60 (hourly), 360 (6h), 1440 (daily), 10080 (weekly), 0 (manual only). Default: 1440","default":1440},"tagDefinitionId":{"type":"string","description":"Tag definition ID (required for update_tag, delete_tag)"},"tagDisplayName":{"type":"string","description":"Display name for the tag (required for create_tag, optional for update_tag)"},"tagFieldType":{"type":"string","description":"Field type: text, number, date, boolean (optional for create_tag, defaults to text)","enum":["text","number","date","boolean"]},"topK":{"type":"number","description":"Number of results to return (1-50, default: 5)","default":5},"workspaceId":{"type":"string","description":"Workspace ID (required for 'create', optional filter for 'list')"}}},"operation":{"type":"string","description":"The operation to perform","enum":["create","get","query","add_file","update","delete","delete_document","update_document","list_tags","create_tag","update_tag","delete_tag","get_tag_usage","add_connector","update_connector","delete_connector","sync_connector"]}},"required":["operation","args"]}, + resultSchema: {"type":"object","properties":{"data":{"type":"object","description":"Operation-specific result payload."},"message":{"type":"string","description":"Human-readable outcome summary."},"success":{"type":"boolean","description":"Whether the operation succeeded."}},"required":["success","message"]}, requiresConfirmation: true, -} +}; export const ListFolders: ToolCatalogEntry = { - id: 'list_folders', - name: 'list_folders', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - workspaceId: { type: 'string', description: 'Optional workspace ID to list folders for.' }, - }, - }, -} + id: "list_folders", + name: "list_folders", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"workspaceId":{"type":"string","description":"Optional workspace ID to list folders for."}}}, +}; export const ListUserWorkspaces: ToolCatalogEntry = { - id: 'list_user_workspaces', - name: 'list_user_workspaces', - route: 'sim', - mode: 'async', - parameters: { type: 'object', properties: {} }, -} + id: "list_user_workspaces", + name: "list_user_workspaces", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{}}, +}; export const ListWorkspaceMcpServers: ToolCatalogEntry = { - id: 'list_workspace_mcp_servers', - name: 'list_workspace_mcp_servers', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - workspaceId: { type: 'string', description: 'Workspace ID (defaults to current workspace)' }, - }, - }, -} + id: "list_workspace_mcp_servers", + name: "list_workspace_mcp_servers", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"workspaceId":{"type":"string","description":"Workspace ID (defaults to current workspace)"}}}, +}; export const ManageCredential: ToolCatalogEntry = { - id: 'manage_credential', - name: 'manage_credential', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - credentialId: { type: 'string', description: 'The credential ID (required for rename)' }, - credentialIds: { - type: 'array', - description: 'Array of credential IDs (for batch delete)', - items: { type: 'string' }, - }, - displayName: { type: 'string', description: 'New display name (required for rename)' }, - operation: { - type: 'string', - description: 'The operation to perform', - enum: ['rename', 'delete'], - }, - }, - required: ['operation'], - }, + id: "manage_credential", + name: "manage_credential", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"credentialId":{"type":"string","description":"The credential ID (required for rename)"},"credentialIds":{"type":"array","description":"Array of credential IDs (for batch delete)","items":{"type":"string"}},"displayName":{"type":"string","description":"New display name (required for rename)"},"operation":{"type":"string","description":"The operation to perform","enum":["rename","delete"]}},"required":["operation"]}, requiresConfirmation: true, - requiredPermission: 'admin', -} + requiredPermission: "admin", +}; export const ManageCustomTool: ToolCatalogEntry = { - id: 'manage_custom_tool', - name: 'manage_custom_tool', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - code: { - type: 'string', - description: - 'The JavaScript code that executes when the tool is called (required for add). Parameters from schema are available as variables. Function body only - no signature or wrapping braces.', - }, - operation: { - type: 'string', - description: "The operation to perform: 'add', 'edit', 'list', or 'delete'", - enum: ['add', 'edit', 'delete', 'list'], - }, - schema: { - type: 'object', - description: 'The tool schema in OpenAI function calling format (required for add).', - properties: { - function: { - type: 'object', - description: 'The function definition', - properties: { - description: { type: 'string', description: 'What the function does' }, - name: { type: 'string', description: 'The function name (camelCase)' }, - parameters: { - type: 'object', - description: 'The function parameters schema', - properties: { - properties: { - type: 'object', - description: 'Parameter definitions as key-value pairs', - }, - required: { - type: 'array', - description: 'Array of required parameter names', - items: { type: 'string' }, - }, - type: { type: 'string', description: "Must be 'object'" }, - }, - required: ['type', 'properties'], - }, - }, - required: ['name', 'parameters'], - }, - type: { type: 'string', description: "Must be 'function'" }, - }, - required: ['type', 'function'], - }, - toolId: { - type: 'string', - description: - "The ID of the custom tool (required for edit). Must be the exact toolId from the get_workflow_data custom tool response - do not guess or construct it. DO NOT PROVIDE THE TOOL ID IF THE OPERATION IS 'ADD'.", - }, - toolIds: { - type: 'array', - description: 'Array of custom tool IDs (for batch delete)', - items: { type: 'string' }, - }, - }, - required: ['operation'], - }, + id: "manage_custom_tool", + name: "manage_custom_tool", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"code":{"type":"string","description":"The JavaScript code that executes when the tool is called (required for add). Parameters from schema are available as variables. Function body only - no signature or wrapping braces."},"operation":{"type":"string","description":"The operation to perform: 'add', 'edit', 'list', or 'delete'","enum":["add","edit","delete","list"]},"schema":{"type":"object","description":"The tool schema in OpenAI function calling format (required for add).","properties":{"function":{"type":"object","description":"The function definition","properties":{"description":{"type":"string","description":"What the function does"},"name":{"type":"string","description":"The function name (camelCase)"},"parameters":{"type":"object","description":"The function parameters schema","properties":{"properties":{"type":"object","description":"Parameter definitions as key-value pairs"},"required":{"type":"array","description":"Array of required parameter names","items":{"type":"string"}},"type":{"type":"string","description":"Must be 'object'"}},"required":["type","properties"]}},"required":["name","parameters"]},"type":{"type":"string","description":"Must be 'function'"}},"required":["type","function"]},"toolId":{"type":"string","description":"The ID of the custom tool (required for edit). Must be the exact toolId from the get_workflow_data custom tool response - do not guess or construct it. DO NOT PROVIDE THE TOOL ID IF THE OPERATION IS 'ADD'."},"toolIds":{"type":"array","description":"Array of custom tool IDs (for batch delete)","items":{"type":"string"}}},"required":["operation"]}, requiresConfirmation: true, -} +}; export const ManageJob: ToolCatalogEntry = { - id: 'manage_job', - name: 'manage_job', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - args: { - type: 'object', - description: - 'Operation-specific arguments. For create: {title, prompt, cron?, time?, timezone?, lifecycle?, successCondition?, maxRuns?}. For get/delete: {jobId}. For update: {jobId, title?, prompt?, cron?, timezone?, status?, lifecycle?, successCondition?, maxRuns?}. For list: no args needed.', - properties: { - cron: { type: 'string', description: 'Cron expression for recurring jobs' }, - jobId: { type: 'string', description: 'Job ID (required for get, update)' }, - jobIds: { - type: 'array', - description: 'Array of job IDs (for batch delete)', - items: { type: 'string' }, - }, - lifecycle: { - type: 'string', - description: - "'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called.", - }, - maxRuns: { - type: 'integer', - description: 'Max executions before auto-completing. Safety limit.', - }, - prompt: { type: 'string', description: 'The prompt to execute when the job fires' }, - status: { type: 'string', description: 'Job status: active, paused' }, - successCondition: { - type: 'string', - description: - 'What must happen for the job to be considered complete (until_complete lifecycle).', - }, - time: { - type: 'string', - description: 'ISO 8601 datetime for one-time jobs or cron start time', - }, - timezone: { - type: 'string', - description: 'IANA timezone (e.g. America/New_York). Defaults to UTC.', - }, - title: { - type: 'string', - description: "Short descriptive title for the job (e.g. 'Email Poller')", - }, - }, - }, - operation: { - type: 'string', - description: 'The operation to perform: create, list, get, update, delete', - enum: ['create', 'list', 'get', 'update', 'delete'], - }, - }, - required: ['operation'], - }, -} + id: "manage_job", + name: "manage_job", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"args":{"type":"object","description":"Operation-specific arguments. For create: {title, prompt, cron?, time?, timezone?, lifecycle?, successCondition?, maxRuns?}. For get/delete: {jobId}. For update: {jobId, title?, prompt?, cron?, timezone?, status?, lifecycle?, successCondition?, maxRuns?}. For list: no args needed.","properties":{"cron":{"type":"string","description":"Cron expression for recurring jobs"},"jobId":{"type":"string","description":"Job ID (required for get, update)"},"jobIds":{"type":"array","description":"Array of job IDs (for batch delete)","items":{"type":"string"}},"lifecycle":{"type":"string","description":"'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called."},"maxRuns":{"type":"integer","description":"Max executions before auto-completing. Safety limit."},"prompt":{"type":"string","description":"The prompt to execute when the job fires"},"status":{"type":"string","description":"Job status: active, paused"},"successCondition":{"type":"string","description":"What must happen for the job to be considered complete (until_complete lifecycle)."},"time":{"type":"string","description":"ISO 8601 datetime for one-time jobs or cron start time"},"timezone":{"type":"string","description":"IANA timezone (e.g. America/New_York). Defaults to UTC."},"title":{"type":"string","description":"Short descriptive title for the job (e.g. 'Email Poller')"}}},"operation":{"type":"string","description":"The operation to perform: create, list, get, update, delete","enum":["create","list","get","update","delete"]}},"required":["operation"]}, +}; export const ManageMcpTool: ToolCatalogEntry = { - id: 'manage_mcp_tool', - name: 'manage_mcp_tool', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - config: { - type: 'object', - description: 'Required for add and edit. The MCP server configuration.', - properties: { - enabled: { - type: 'boolean', - description: 'Whether the server is enabled (default: true)', - }, - headers: { - type: 'object', - description: 'Optional HTTP headers to send with requests (key-value pairs)', - }, - name: { type: 'string', description: 'Display name for the MCP server' }, - timeout: { - type: 'number', - description: 'Request timeout in milliseconds (default: 30000)', - }, - transport: { - type: 'string', - description: "Transport protocol: 'streamable-http' or 'sse'", - enum: ['streamable-http', 'sse'], - default: 'streamable-http', - }, - url: { type: 'string', description: 'The MCP server endpoint URL (required for add)' }, - }, - }, - operation: { - type: 'string', - description: "The operation to perform: 'add', 'edit', 'list', or 'delete'", - enum: ['add', 'edit', 'delete', 'list'], - }, - serverId: { - type: 'string', - description: - "Required for edit and delete. The database ID of the MCP server. DO NOT PROVIDE if operation is 'add' or 'list'.", - }, - }, - required: ['operation'], - }, + id: "manage_mcp_tool", + name: "manage_mcp_tool", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"config":{"type":"object","description":"Required for add and edit. The MCP server configuration.","properties":{"enabled":{"type":"boolean","description":"Whether the server is enabled (default: true)"},"headers":{"type":"object","description":"Optional HTTP headers to send with requests (key-value pairs)"},"name":{"type":"string","description":"Display name for the MCP server"},"timeout":{"type":"number","description":"Request timeout in milliseconds (default: 30000)"},"transport":{"type":"string","description":"Transport protocol: 'streamable-http' or 'sse'","enum":["streamable-http","sse"],"default":"streamable-http"},"url":{"type":"string","description":"The MCP server endpoint URL (required for add)"}}},"operation":{"type":"string","description":"The operation to perform: 'add', 'edit', 'list', or 'delete'","enum":["add","edit","delete","list"]},"serverId":{"type":"string","description":"Required for edit and delete. The database ID of the MCP server. DO NOT PROVIDE if operation is 'add' or 'list'."}},"required":["operation"]}, requiresConfirmation: true, - requiredPermission: 'write', -} + requiredPermission: "write", +}; export const ManageSkill: ToolCatalogEntry = { - id: 'manage_skill', - name: 'manage_skill', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - content: { - type: 'string', - description: 'Markdown instructions for the skill. Required for add, optional for edit.', - }, - description: { - type: 'string', - description: 'Short description of the skill. Required for add, optional for edit.', - }, - name: { - type: 'string', - description: - "Skill name in kebab-case (e.g. 'my-skill'). Required for add, optional for edit.", - }, - operation: { - type: 'string', - description: "The operation to perform: 'add', 'edit', 'list', or 'delete'", - enum: ['add', 'edit', 'delete', 'list'], - }, - skillId: { - type: 'string', - description: - "The ID of the skill (required for edit/delete). Must be the exact ID from the VFS or list. DO NOT PROVIDE if operation is 'add' or 'list'.", - }, - }, - required: ['operation'], - }, + id: "manage_skill", + name: "manage_skill", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"content":{"type":"string","description":"Markdown instructions for the skill. Required for add, optional for edit."},"description":{"type":"string","description":"Short description of the skill. Required for add, optional for edit."},"name":{"type":"string","description":"Skill name in kebab-case (e.g. 'my-skill'). Required for add, optional for edit."},"operation":{"type":"string","description":"The operation to perform: 'add', 'edit', 'list', or 'delete'","enum":["add","edit","delete","list"]},"skillId":{"type":"string","description":"The ID of the skill (required for edit/delete). Must be the exact ID from the VFS or list. DO NOT PROVIDE if operation is 'add' or 'list'."}},"required":["operation"]}, requiresConfirmation: true, - requiredPermission: 'write', -} + requiredPermission: "write", +}; export const MaterializeFile: ToolCatalogEntry = { - id: 'materialize_file', - name: 'materialize_file', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - fileNames: { - type: 'array', - description: - 'The names of the uploaded files to materialize (e.g. ["report.pdf", "data.csv"])', - items: { type: 'string' }, - }, - knowledgeBaseId: { - type: 'string', - description: - 'ID of an existing knowledge base to add the file to (only used with operation "knowledge_base"). If omitted, a new KB is created.', - }, - operation: { - type: 'string', - description: - 'What to do with the file. "save" promotes it to files/. "import" imports a workflow JSON. "table" converts CSV/TSV/JSON to a table. "knowledge_base" saves and adds to a KB. Defaults to "save".', - enum: ['save', 'import', 'table', 'knowledge_base'], - default: 'save', - }, - tableName: { - type: 'string', - description: - 'Custom name for the table (only used with operation "table"). Defaults to the file name without extension.', - }, - }, - required: ['fileNames'], - }, - requiredPermission: 'write', -} + id: "materialize_file", + name: "materialize_file", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"fileNames":{"type":"array","description":"The names of the uploaded files to materialize (e.g. [\"report.pdf\", \"data.csv\"])","items":{"type":"string"}},"knowledgeBaseId":{"type":"string","description":"ID of an existing knowledge base to add the file to (only used with operation \"knowledge_base\"). If omitted, a new KB is created."},"operation":{"type":"string","description":"What to do with the file. \"save\" promotes it to files/. \"import\" imports a workflow JSON. \"table\" converts CSV/TSV/JSON to a table. \"knowledge_base\" saves and adds to a KB. Defaults to \"save\".","enum":["save","import","table","knowledge_base"],"default":"save"},"tableName":{"type":"string","description":"Custom name for the table (only used with operation \"table\"). Defaults to the file name without extension."}},"required":["fileNames"]}, + requiredPermission: "write", +}; export const MoveFolder: ToolCatalogEntry = { - id: 'move_folder', - name: 'move_folder', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - folderId: { type: 'string', description: 'The folder ID to move.' }, - parentId: { - type: 'string', - description: - 'Target parent folder ID. Omit or pass empty string to move to workspace root.', - }, - }, - required: ['folderId'], - }, - requiredPermission: 'write', -} + id: "move_folder", + name: "move_folder", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"folderId":{"type":"string","description":"The folder ID to move."},"parentId":{"type":"string","description":"Target parent folder ID. Omit or pass empty string to move to workspace root."}},"required":["folderId"]}, + requiredPermission: "write", +}; export const MoveWorkflow: ToolCatalogEntry = { - id: 'move_workflow', - name: 'move_workflow', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - folderId: { - type: 'string', - description: 'Target folder ID. Omit or pass empty string to move to workspace root.', - }, - workflowIds: { - type: 'array', - description: 'The workflow IDs to move.', - items: { type: 'string' }, - }, - }, - required: ['workflowIds'], - }, - requiredPermission: 'write', -} + id: "move_workflow", + name: "move_workflow", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"folderId":{"type":"string","description":"Target folder ID. Omit or pass empty string to move to workspace root."},"workflowIds":{"type":"array","description":"The workflow IDs to move.","items":{"type":"string"}}},"required":["workflowIds"]}, + requiredPermission: "write", +}; export const OauthGetAuthLink: ToolCatalogEntry = { - id: 'oauth_get_auth_link', - name: 'oauth_get_auth_link', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - providerName: { - type: 'string', - description: - "The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar', 'GitHub')", - }, - }, - required: ['providerName'], - }, -} + id: "oauth_get_auth_link", + name: "oauth_get_auth_link", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"providerName":{"type":"string","description":"The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar', 'GitHub')"}},"required":["providerName"]}, +}; export const OauthRequestAccess: ToolCatalogEntry = { - id: 'oauth_request_access', - name: 'oauth_request_access', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - providerName: { - type: 'string', - description: - "The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar')", - }, - }, - required: ['providerName'], - }, + id: "oauth_request_access", + name: "oauth_request_access", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"providerName":{"type":"string","description":"The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar')"}},"required":["providerName"]}, requiresConfirmation: true, -} +}; export const OpenResource: ToolCatalogEntry = { - id: 'open_resource', - name: 'open_resource', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - resources: { - type: 'array', - description: 'Array of resources to open. Each item must have type and id.', - items: { - type: 'object', - properties: { - id: { type: 'string', description: 'The resource ID.' }, - type: { - type: 'string', - description: 'The resource type.', - enum: ['workflow', 'table', 'knowledgebase', 'file', 'log'], - }, - }, - required: ['type', 'id'], - }, - }, - }, - required: ['resources'], - }, -} + id: "open_resource", + name: "open_resource", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"resources":{"type":"array","description":"Array of resources to open. Each item must have type and id.","items":{"type":"object","properties":{"id":{"type":"string","description":"The resource ID."},"type":{"type":"string","description":"The resource type.","enum":["workflow","table","knowledgebase","file","log"]}},"required":["type","id"]}}},"required":["resources"]}, +}; export const Read: ToolCatalogEntry = { - id: 'read', - name: 'read', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - limit: { type: 'number', description: 'Maximum number of lines to read.' }, - offset: { type: 'number', description: 'Line offset to start reading from (0-indexed).' }, - outputTable: { - type: 'string', - description: - 'Table ID to import the file contents into (CSV/JSON). All existing rows are replaced. Example: "tbl_abc123"', - }, - path: { - type: 'string', - description: - "Path to the file to read (e.g. 'workflows/My Workflow/state.json' or 'workflows/Projects/Q1/My Workflow/state.json').", - }, - }, - required: ['path'], - }, -} + id: "read", + name: "read", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"limit":{"type":"number","description":"Maximum number of lines to read."},"offset":{"type":"number","description":"Line offset to start reading from (0-indexed)."},"outputTable":{"type":"string","description":"Table ID to import the file contents into (CSV/JSON). All existing rows are replaced. Example: \"tbl_abc123\""},"path":{"type":"string","description":"Path to the file to read (e.g. 'workflows/My Workflow/state.json' or 'workflows/Projects/Q1/My Workflow/state.json')."}},"required":["path"]}, +}; export const Redeploy: ToolCatalogEntry = { - id: 'redeploy', - name: 'redeploy', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - workflowId: { - type: 'string', - description: 'Workflow ID to redeploy (required in workspace context)', - }, - }, - }, - resultSchema: { - type: 'object', - properties: { - apiEndpoint: { type: 'string', description: 'Canonical workflow execution endpoint.' }, - baseUrl: { type: 'string', description: 'Base URL used to construct deployment URLs.' }, - deployedAt: { - type: 'string', - description: 'Deployment timestamp when the workflow is deployed.', - }, - deploymentConfig: { - type: 'object', - description: - 'Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details.', - }, - deploymentStatus: { - type: 'object', - description: 'Structured per-surface deployment status keyed by surface name, such as api.', - }, - deploymentType: { - type: 'string', - description: - 'Deployment surface this result describes. For deploy_api and redeploy this is always "api".', - }, - examples: { - type: 'object', - description: - 'Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling.', - }, - isDeployed: { - type: 'boolean', - description: 'Whether the workflow API is currently deployed after this tool call.', - }, - version: { - type: 'number', - description: 'Deployment version for the current API deployment.', - }, - workflowId: { type: 'string', description: 'Workflow ID that was deployed or undeployed.' }, - }, - required: [ - 'workflowId', - 'isDeployed', - 'deploymentType', - 'deploymentStatus', - 'deploymentConfig', - 'examples', - ], - }, + id: "redeploy", + name: "redeploy", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"workflowId":{"type":"string","description":"Workflow ID to redeploy (required in workspace context)"}}}, + resultSchema: {"type":"object","properties":{"apiEndpoint":{"type":"string","description":"Canonical workflow execution endpoint."},"baseUrl":{"type":"string","description":"Base URL used to construct deployment URLs."},"deployedAt":{"type":"string","description":"Deployment timestamp when the workflow is deployed."},"deploymentConfig":{"type":"object","description":"Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details."},"deploymentStatus":{"type":"object","description":"Structured per-surface deployment status keyed by surface name, such as api."},"deploymentType":{"type":"string","description":"Deployment surface this result describes. For deploy_api and redeploy this is always \"api\"."},"examples":{"type":"object","description":"Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling."},"isDeployed":{"type":"boolean","description":"Whether the workflow API is currently deployed after this tool call."},"version":{"type":"number","description":"Deployment version for the current API deployment."},"workflowId":{"type":"string","description":"Workflow ID that was deployed or undeployed."}},"required":["workflowId","isDeployed","deploymentType","deploymentStatus","deploymentConfig","examples"]}, requiresConfirmation: true, - requiredPermission: 'admin', -} + requiredPermission: "admin", +}; export const RenameFile: ToolCatalogEntry = { - id: 'rename_file', - name: 'rename_file', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - fileId: { type: 'string', description: 'Canonical workspace file ID of the file to rename.' }, - newName: { - type: 'string', - description: - 'New filename including extension, e.g. "draft_v2.md". Must not contain slashes.', - }, - }, - required: ['fileId', 'newName'], - }, - resultSchema: { - type: 'object', - properties: { - data: { type: 'object', description: 'Contains id and the new name.' }, - message: { type: 'string', description: 'Human-readable outcome.' }, - success: { type: 'boolean', description: 'Whether the rename succeeded.' }, - }, - required: ['success', 'message'], - }, - requiredPermission: 'write', -} + id: "rename_file", + name: "rename_file", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"fileId":{"type":"string","description":"Canonical workspace file ID of the file to rename."},"newName":{"type":"string","description":"New filename including extension, e.g. \"draft_v2.md\". Must not contain slashes."}},"required":["fileId","newName"]}, + resultSchema: {"type":"object","properties":{"data":{"type":"object","description":"Contains id and the new name."},"message":{"type":"string","description":"Human-readable outcome."},"success":{"type":"boolean","description":"Whether the rename succeeded."}},"required":["success","message"]}, + requiredPermission: "write", +}; export const RenameWorkflow: ToolCatalogEntry = { - id: 'rename_workflow', - name: 'rename_workflow', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - name: { type: 'string', description: 'The new name for the workflow.' }, - workflowId: { type: 'string', description: 'The workflow ID to rename.' }, - }, - required: ['workflowId', 'name'], - }, - requiredPermission: 'write', -} + id: "rename_workflow", + name: "rename_workflow", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"name":{"type":"string","description":"The new name for the workflow."},"workflowId":{"type":"string","description":"The workflow ID to rename."}},"required":["workflowId","name"]}, + requiredPermission: "write", +}; export const Research: ToolCatalogEntry = { - id: 'research', - name: 'research', - route: 'subagent', - mode: 'async', - parameters: { - properties: { topic: { description: 'The topic to research.', type: 'string' } }, - required: ['topic'], - type: 'object', - }, - subagentId: 'research', + id: "research", + name: "research", + route: "subagent", + mode: "async", + parameters: {"properties":{"topic":{"description":"The topic to research.","type":"string"}},"required":["topic"],"type":"object"}, + subagentId: "research", internal: true, -} +}; export const Respond: ToolCatalogEntry = { - id: 'respond', - name: 'respond', - route: 'sim', - mode: 'async', - parameters: { - additionalProperties: true, - properties: { - output: { - description: - 'The result — facts, status, VFS paths to persisted data, whatever the caller needs to act on.', - type: 'string', - }, - success: { description: 'Whether the task completed successfully', type: 'boolean' }, - type: { description: 'Optional logical result type override', type: 'string' }, - }, - required: ['output', 'success'], - type: 'object', - }, + id: "respond", + name: "respond", + route: "sim", + mode: "async", + parameters: {"additionalProperties":true,"properties":{"output":{"description":"The result — facts, status, VFS paths to persisted data, whatever the caller needs to act on.","type":"string"},"success":{"description":"Whether the task completed successfully","type":"boolean"},"type":{"description":"Optional logical result type override","type":"string"}},"required":["output","success"],"type":"object"}, internal: true, hidden: true, -} +}; export const RestoreResource: ToolCatalogEntry = { - id: 'restore_resource', - name: 'restore_resource', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - id: { type: 'string', description: 'The canonical resource ID to restore.' }, - type: { - type: 'string', - description: 'The resource type to restore.', - enum: ['workflow', 'table', 'file', 'knowledgebase', 'folder'], - }, - }, - required: ['type', 'id'], - }, + id: "restore_resource", + name: "restore_resource", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"id":{"type":"string","description":"The canonical resource ID to restore."},"type":{"type":"string","description":"The resource type to restore.","enum":["workflow","table","file","knowledgebase","folder"]}},"required":["type","id"]}, requiresConfirmation: true, - requiredPermission: 'admin', -} + requiredPermission: "admin", +}; export const RevertToVersion: ToolCatalogEntry = { - id: 'revert_to_version', - name: 'revert_to_version', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - version: { type: 'number', description: 'The deployment version number to revert to' }, - workflowId: { type: 'string', description: 'The workflow ID' }, - }, - required: ['workflowId', 'version'], - }, + id: "revert_to_version", + name: "revert_to_version", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"version":{"type":"number","description":"The deployment version number to revert to"},"workflowId":{"type":"string","description":"The workflow ID"}},"required":["workflowId","version"]}, requiresConfirmation: true, - requiredPermission: 'admin', -} + requiredPermission: "admin", +}; export const Run: ToolCatalogEntry = { - id: 'run', - name: 'run', - route: 'subagent', - mode: 'async', - parameters: { - properties: { - context: { - description: 'Pre-gathered context: workflow state, block IDs, input requirements.', - type: 'string', - }, - request: { description: 'What to run or what logs to check.', type: 'string' }, - }, - required: ['request'], - type: 'object', - }, - subagentId: 'run', + id: "run", + name: "run", + route: "subagent", + mode: "async", + parameters: {"properties":{"context":{"description":"Pre-gathered context: workflow state, block IDs, input requirements.","type":"string"},"request":{"description":"What to run or what logs to check.","type":"string"}},"required":["request"],"type":"object"}, + subagentId: "run", internal: true, -} +}; export const RunBlock: ToolCatalogEntry = { - id: 'run_block', - name: 'run_block', - route: 'client', - mode: 'async', - parameters: { - type: 'object', - properties: { - blockId: { type: 'string', description: 'The block ID to run in isolation.' }, - executionId: { - type: 'string', - description: - 'Optional execution ID to load the snapshot from. Uses latest execution if omitted.', - }, - useDeployedState: { - type: 'boolean', - description: - 'When true, runs the deployed version instead of the live draft. Default: false (draft).', - }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID to run. If not provided, uses the current workflow in context.', - }, - workflow_input: { - type: 'object', - description: 'JSON object with key-value mappings where each key is an input field name', - }, - }, - required: ['blockId'], - }, + id: "run_block", + name: "run_block", + route: "client", + mode: "async", + parameters: {"type":"object","properties":{"blockId":{"type":"string","description":"The block ID to run in isolation."},"executionId":{"type":"string","description":"Optional execution ID to load the snapshot from. Uses latest execution if omitted."},"useDeployedState":{"type":"boolean","description":"When true, runs the deployed version instead of the live draft. Default: false (draft)."},"workflowId":{"type":"string","description":"Optional workflow ID to run. If not provided, uses the current workflow in context."},"workflow_input":{"type":"object","description":"JSON object with key-value mappings where each key is an input field name"}},"required":["blockId"]}, clientExecutable: true, requiresConfirmation: true, -} +}; export const RunFromBlock: ToolCatalogEntry = { - id: 'run_from_block', - name: 'run_from_block', - route: 'client', - mode: 'async', - parameters: { - type: 'object', - properties: { - executionId: { - type: 'string', - description: - 'Optional execution ID to load the snapshot from. Uses latest execution if omitted.', - }, - startBlockId: { type: 'string', description: 'The block ID to start execution from.' }, - useDeployedState: { - type: 'boolean', - description: - 'When true, runs the deployed version instead of the live draft. Default: false (draft).', - }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID to run. If not provided, uses the current workflow in context.', - }, - workflow_input: { - type: 'object', - description: 'JSON object with key-value mappings where each key is an input field name', - }, - }, - required: ['startBlockId'], - }, + id: "run_from_block", + name: "run_from_block", + route: "client", + mode: "async", + parameters: {"type":"object","properties":{"executionId":{"type":"string","description":"Optional execution ID to load the snapshot from. Uses latest execution if omitted."},"startBlockId":{"type":"string","description":"The block ID to start execution from."},"useDeployedState":{"type":"boolean","description":"When true, runs the deployed version instead of the live draft. Default: false (draft)."},"workflowId":{"type":"string","description":"Optional workflow ID to run. If not provided, uses the current workflow in context."},"workflow_input":{"type":"object","description":"JSON object with key-value mappings where each key is an input field name"}},"required":["startBlockId"]}, clientExecutable: true, requiresConfirmation: true, -} +}; export const RunWorkflow: ToolCatalogEntry = { - id: 'run_workflow', - name: 'run_workflow', - route: 'client', - mode: 'async', - parameters: { - type: 'object', - properties: { - triggerBlockId: { - type: 'string', - description: - 'Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one.', - }, - useDeployedState: { - type: 'boolean', - description: - 'When true, runs the deployed version instead of the live draft. Default: false (draft).', - }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID to run. If not provided, uses the current workflow in context.', - }, - workflow_input: { - type: 'object', - description: 'JSON object with key-value mappings where each key is an input field name', - }, - }, - required: ['workflow_input'], - }, + id: "run_workflow", + name: "run_workflow", + route: "client", + mode: "async", + parameters: {"type":"object","properties":{"triggerBlockId":{"type":"string","description":"Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one."},"useDeployedState":{"type":"boolean","description":"When true, runs the deployed version instead of the live draft. Default: false (draft)."},"workflowId":{"type":"string","description":"Optional workflow ID to run. If not provided, uses the current workflow in context."},"workflow_input":{"type":"object","description":"JSON object with key-value mappings where each key is an input field name"}},"required":["workflow_input"]}, clientExecutable: true, requiresConfirmation: true, -} +}; export const RunWorkflowUntilBlock: ToolCatalogEntry = { - id: 'run_workflow_until_block', - name: 'run_workflow_until_block', - route: 'client', - mode: 'async', - parameters: { - type: 'object', - properties: { - stopAfterBlockId: { - type: 'string', - description: 'The block ID to stop after. Execution halts once this block completes.', - }, - triggerBlockId: { - type: 'string', - description: - 'Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one.', - }, - useDeployedState: { - type: 'boolean', - description: - 'When true, runs the deployed version instead of the live draft. Default: false (draft).', - }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID to run. If not provided, uses the current workflow in context.', - }, - workflow_input: { - type: 'object', - description: 'JSON object with key-value mappings where each key is an input field name', - }, - }, - required: ['stopAfterBlockId'], - }, + id: "run_workflow_until_block", + name: "run_workflow_until_block", + route: "client", + mode: "async", + parameters: {"type":"object","properties":{"stopAfterBlockId":{"type":"string","description":"The block ID to stop after. Execution halts once this block completes."},"triggerBlockId":{"type":"string","description":"Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one."},"useDeployedState":{"type":"boolean","description":"When true, runs the deployed version instead of the live draft. Default: false (draft)."},"workflowId":{"type":"string","description":"Optional workflow ID to run. If not provided, uses the current workflow in context."},"workflow_input":{"type":"object","description":"JSON object with key-value mappings where each key is an input field name"}},"required":["stopAfterBlockId"]}, clientExecutable: true, requiresConfirmation: true, -} +}; export const ScrapePage: ToolCatalogEntry = { - id: 'scrape_page', - name: 'scrape_page', - route: 'go', - mode: 'sync', - parameters: { - type: 'object', - properties: { - include_links: { - type: 'boolean', - description: 'Extract all links from the page (default false)', - }, - url: { type: 'string', description: 'The URL to scrape (must include https://)' }, - wait_for: { - type: 'string', - description: 'CSS selector to wait for before scraping (for JS-heavy pages)', - }, - }, - required: ['url'], - }, -} + id: "scrape_page", + name: "scrape_page", + route: "go", + mode: "sync", + parameters: {"type":"object","properties":{"include_links":{"type":"boolean","description":"Extract all links from the page (default false)"},"url":{"type":"string","description":"The URL to scrape (must include https://)"},"wait_for":{"type":"string","description":"CSS selector to wait for before scraping (for JS-heavy pages)"}},"required":["url"]}, +}; export const SearchDocumentation: ToolCatalogEntry = { - id: 'search_documentation', - name: 'search_documentation', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - query: { type: 'string', description: 'The search query' }, - topK: { type: 'number', description: 'Number of results (max 10)' }, - }, - required: ['query'], - }, -} + id: "search_documentation", + name: "search_documentation", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"query":{"type":"string","description":"The search query"},"topK":{"type":"number","description":"Number of results (max 10)"}},"required":["query"]}, +}; export const SearchLibraryDocs: ToolCatalogEntry = { - id: 'search_library_docs', - name: 'search_library_docs', - route: 'go', - mode: 'sync', - parameters: { - type: 'object', - properties: { - library_name: { - type: 'string', - description: "Name of the library to search for (e.g., 'nextjs', 'stripe', 'langchain')", - }, - query: { - type: 'string', - description: 'The question or topic to find documentation for - be specific', - }, - version: { type: 'string', description: "Specific version (optional, e.g., '14', 'v2')" }, - }, - required: ['library_name', 'query'], - }, -} + id: "search_library_docs", + name: "search_library_docs", + route: "go", + mode: "sync", + parameters: {"type":"object","properties":{"library_name":{"type":"string","description":"Name of the library to search for (e.g., 'nextjs', 'stripe', 'langchain')"},"query":{"type":"string","description":"The question or topic to find documentation for - be specific"},"version":{"type":"string","description":"Specific version (optional, e.g., '14', 'v2')"}},"required":["library_name","query"]}, +}; export const SearchOnline: ToolCatalogEntry = { - id: 'search_online', - name: 'search_online', - route: 'go', - mode: 'sync', - parameters: { - type: 'object', - properties: { - category: { - type: 'string', - description: 'Filter by category', - enum: [ - 'news', - 'tweet', - 'github', - 'paper', - 'company', - 'research paper', - 'linkedin profile', - 'pdf', - 'personal site', - ], - }, - include_text: { type: 'boolean', description: 'Include page text content (default true)' }, - num_results: { type: 'number', description: 'Number of results (default 10, max 25)' }, - query: { type: 'string', description: 'Natural language search query' }, - toolTitle: { - type: 'string', - description: - 'Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like "pricing changes" or "Slack webhook docs", not a full sentence like "Searching online for pricing changes".', - }, - }, - required: ['query', 'toolTitle'], - }, -} + id: "search_online", + name: "search_online", + route: "go", + mode: "sync", + parameters: {"type":"object","properties":{"category":{"type":"string","description":"Filter by category","enum":["news","tweet","github","paper","company","research paper","linkedin profile","pdf","personal site"]},"include_text":{"type":"boolean","description":"Include page text content (default true)"},"num_results":{"type":"number","description":"Number of results (default 10, max 25)"},"query":{"type":"string","description":"Natural language search query"},"toolTitle":{"type":"string","description":"Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like \"pricing changes\" or \"Slack webhook docs\", not a full sentence like \"Searching online for pricing changes\"."}},"required":["query","toolTitle"]}, +}; export const SearchPatterns: ToolCatalogEntry = { - id: 'search_patterns', - name: 'search_patterns', - route: 'go', - mode: 'sync', - parameters: { - type: 'object', - properties: { - limit: { - type: 'integer', - description: 'Maximum number of unique pattern examples to return (defaults to 3).', - }, - queries: { - type: 'array', - description: - 'Up to 3 descriptive strings explaining the workflow pattern(s) you need. Focus on intent and desired outcomes.', - items: { - type: 'string', - description: 'Example: "how to automate wealthbox meeting notes into follow-up tasks"', - }, - }, - }, - required: ['queries'], - }, -} + id: "search_patterns", + name: "search_patterns", + route: "go", + mode: "sync", + parameters: {"type":"object","properties":{"limit":{"type":"integer","description":"Maximum number of unique pattern examples to return (defaults to 3)."},"queries":{"type":"array","description":"Up to 3 descriptive strings explaining the workflow pattern(s) you need. Focus on intent and desired outcomes.","items":{"type":"string","description":"Example: \"how to automate wealthbox meeting notes into follow-up tasks\""}}},"required":["queries"]}, +}; export const SetBlockEnabled: ToolCatalogEntry = { - id: 'set_block_enabled', - name: 'set_block_enabled', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - blockId: { - type: 'string', - description: 'The block ID whose enabled state should be changed.', - }, - enabled: { - type: 'boolean', - description: 'Set to true to enable the block, or false to disable it.', - }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID to edit. If not provided, uses the current workflow in context.', - }, - }, - required: ['blockId', 'enabled'], - }, - requiredPermission: 'write', -} + id: "set_block_enabled", + name: "set_block_enabled", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"blockId":{"type":"string","description":"The block ID whose enabled state should be changed."},"enabled":{"type":"boolean","description":"Set to true to enable the block, or false to disable it."},"workflowId":{"type":"string","description":"Optional workflow ID to edit. If not provided, uses the current workflow in context."}},"required":["blockId","enabled"]}, + requiredPermission: "write", +}; export const SetEnvironmentVariables: ToolCatalogEntry = { - id: 'set_environment_variables', - name: 'set_environment_variables', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - scope: { - type: 'string', - description: - 'Whether to set workspace or personal environment variables. Defaults to workspace.', - enum: ['personal', 'workspace'], - default: 'workspace', - }, - variables: { - type: 'array', - description: 'List of env vars to set', - items: { - type: 'object', - properties: { - name: { type: 'string', description: 'Variable name' }, - value: { type: 'string', description: 'Variable value' }, - }, - required: ['name', 'value'], - }, - }, - }, - required: ['variables'], - }, + id: "set_environment_variables", + name: "set_environment_variables", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"scope":{"type":"string","description":"Whether to set workspace or personal environment variables. Defaults to workspace.","enum":["personal","workspace"],"default":"workspace"},"variables":{"type":"array","description":"List of env vars to set","items":{"type":"object","properties":{"name":{"type":"string","description":"Variable name"},"value":{"type":"string","description":"Variable value"}},"required":["name","value"]}}},"required":["variables"]}, requiresConfirmation: true, - requiredPermission: 'write', -} + requiredPermission: "write", +}; export const SetGlobalWorkflowVariables: ToolCatalogEntry = { - id: 'set_global_workflow_variables', - name: 'set_global_workflow_variables', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - operations: { - type: 'array', - description: 'List of operations to apply', - items: { - type: 'object', - properties: { - name: { type: 'string' }, - operation: { type: 'string', enum: ['add', 'delete', 'edit'] }, - type: { type: 'string', enum: ['plain', 'number', 'boolean', 'array', 'object'] }, - value: { type: 'string' }, - }, - required: ['operation', 'name', 'type', 'value'], - }, - }, - workflowId: { - type: 'string', - description: 'Optional workflow ID. If not provided, uses the current workflow in context.', - }, - }, - required: ['operations'], - }, + id: "set_global_workflow_variables", + name: "set_global_workflow_variables", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"operations":{"type":"array","description":"List of operations to apply","items":{"type":"object","properties":{"name":{"type":"string"},"operation":{"type":"string","enum":["add","delete","edit"]},"type":{"type":"string","enum":["plain","number","boolean","array","object"]},"value":{"type":"string"}},"required":["operation","name","type","value"]}},"workflowId":{"type":"string","description":"Optional workflow ID. If not provided, uses the current workflow in context."}},"required":["operations"]}, requiresConfirmation: true, - requiredPermission: 'write', -} + requiredPermission: "write", +}; export const Superagent: ToolCatalogEntry = { - id: 'superagent', - name: 'superagent', - route: 'subagent', - mode: 'async', - parameters: { - properties: { - task: { - description: - "A single sentence — the agent has full conversation context. Do NOT pre-read credentials or look up configs. Example: 'send the email we discussed' or 'check my calendar for tomorrow'.", - type: 'string', - }, - }, - required: ['task'], - type: 'object', - }, - subagentId: 'superagent', + id: "superagent", + name: "superagent", + route: "subagent", + mode: "async", + parameters: {"properties":{"task":{"description":"A single sentence — the agent has full conversation context. Do NOT pre-read credentials or look up configs. Example: 'send the email we discussed' or 'check my calendar for tomorrow'.","type":"string"}},"required":["task"],"type":"object"}, + subagentId: "superagent", internal: true, -} +}; export const Table: ToolCatalogEntry = { - id: 'table', - name: 'table', - route: 'subagent', - mode: 'async', - parameters: { - properties: { request: { description: 'What table action is needed.', type: 'string' } }, - required: ['request'], - type: 'object', - }, - subagentId: 'table', + id: "table", + name: "table", + route: "subagent", + mode: "async", + parameters: {"properties":{"request":{"description":"What table action is needed.","type":"string"}},"required":["request"],"type":"object"}, + subagentId: "table", internal: true, -} +}; export const ToolSearchToolRegex: ToolCatalogEntry = { - id: 'tool_search_tool_regex', - name: 'tool_search_tool_regex', - route: 'sim', - mode: 'async', - parameters: { - properties: { - case_insensitive: { - description: 'Whether the regex should be case-insensitive (default true).', - type: 'boolean', - }, - max_results: { - description: 'Maximum number of tools to return (optional).', - type: 'integer', - }, - pattern: { - description: 'Regular expression to match tool names or descriptions.', - type: 'string', - }, - }, - required: ['pattern'], - type: 'object', - }, -} + id: "tool_search_tool_regex", + name: "tool_search_tool_regex", + route: "sim", + mode: "async", + parameters: {"properties":{"case_insensitive":{"description":"Whether the regex should be case-insensitive (default true).","type":"boolean"},"max_results":{"description":"Maximum number of tools to return (optional).","type":"integer"},"pattern":{"description":"Regular expression to match tool names or descriptions.","type":"string"}},"required":["pattern"],"type":"object"}, +}; export const UpdateJobHistory: ToolCatalogEntry = { - id: 'update_job_history', - name: 'update_job_history', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - jobId: { type: 'string', description: 'The job ID.' }, - summary: { - type: 'string', - description: - "A concise summary of what was done this run (e.g., 'Sent follow-up emails to 3 leads: Alice, Bob, Carol').", - }, - }, - required: ['jobId', 'summary'], - }, -} + id: "update_job_history", + name: "update_job_history", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"jobId":{"type":"string","description":"The job ID."},"summary":{"type":"string","description":"A concise summary of what was done this run (e.g., 'Sent follow-up emails to 3 leads: Alice, Bob, Carol')."}},"required":["jobId","summary"]}, +}; export const UpdateWorkspaceMcpServer: ToolCatalogEntry = { - id: 'update_workspace_mcp_server', - name: 'update_workspace_mcp_server', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - description: { type: 'string', description: 'New description for the server' }, - isPublic: { type: 'boolean', description: 'Whether the server is publicly accessible' }, - name: { type: 'string', description: 'New name for the server' }, - serverId: { type: 'string', description: 'Required: the MCP server ID to update' }, - }, - required: ['serverId'], - }, + id: "update_workspace_mcp_server", + name: "update_workspace_mcp_server", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"description":{"type":"string","description":"New description for the server"},"isPublic":{"type":"boolean","description":"Whether the server is publicly accessible"},"name":{"type":"string","description":"New name for the server"},"serverId":{"type":"string","description":"Required: the MCP server ID to update"}},"required":["serverId"]}, requiresConfirmation: true, - requiredPermission: 'admin', -} + requiredPermission: "admin", +}; export const UserMemory: ToolCatalogEntry = { - id: 'user_memory', - name: 'user_memory', - route: 'go', - mode: 'sync', - parameters: { - type: 'object', - properties: { - confidence: { - type: 'number', - description: 'Confidence level 0-1 (default 1.0 for explicit, 0.8 for inferred)', - }, - correct_value: { - type: 'string', - description: "The correct value to replace the wrong one (for 'correct' operation)", - }, - key: { - type: 'string', - description: "Unique key for the memory (e.g., 'preferred_model', 'slack_credential')", - }, - limit: { type: 'number', description: 'Number of results for search (default 10)' }, - memory_type: { - type: 'string', - description: "Type of memory: 'preference', 'entity', 'history', or 'correction'", - enum: ['preference', 'entity', 'history', 'correction'], - }, - operation: { - type: 'string', - description: "Operation: 'add', 'search', 'delete', 'correct', or 'list'", - enum: ['add', 'search', 'delete', 'correct', 'list'], - }, - query: { type: 'string', description: 'Search query to find relevant memories' }, - source: { - type: 'string', - description: "Source: 'explicit' (user told you) or 'inferred' (you observed)", - enum: ['explicit', 'inferred'], - }, - value: { type: 'string', description: 'Value to remember' }, - }, - required: ['operation'], - }, -} + id: "user_memory", + name: "user_memory", + route: "go", + mode: "sync", + parameters: {"type":"object","properties":{"confidence":{"type":"number","description":"Confidence level 0-1 (default 1.0 for explicit, 0.8 for inferred)"},"correct_value":{"type":"string","description":"The correct value to replace the wrong one (for 'correct' operation)"},"key":{"type":"string","description":"Unique key for the memory (e.g., 'preferred_model', 'slack_credential')"},"limit":{"type":"number","description":"Number of results for search (default 10)"},"memory_type":{"type":"string","description":"Type of memory: 'preference', 'entity', 'history', or 'correction'","enum":["preference","entity","history","correction"]},"operation":{"type":"string","description":"Operation: 'add', 'search', 'delete', 'correct', or 'list'","enum":["add","search","delete","correct","list"]},"query":{"type":"string","description":"Search query to find relevant memories"},"source":{"type":"string","description":"Source: 'explicit' (user told you) or 'inferred' (you observed)","enum":["explicit","inferred"]},"value":{"type":"string","description":"Value to remember"}},"required":["operation"]}, +}; export const UserTable: ToolCatalogEntry = { - id: 'user_table', - name: 'user_table', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - args: { - type: 'object', - description: 'Arguments for the operation', - properties: { - column: { - type: 'object', - description: 'Column definition for add_column: { name, type, unique?, position? }', - }, - columnName: { - type: 'string', - description: - 'Column name (required for rename_column, update_column; use columnNames array for batch delete_column)', - }, - columnNames: { - type: 'array', - description: - 'Array of column names to delete at once (for delete_column). Preferred over columnName when deleting multiple columns.', - }, - data: { - type: 'object', - description: 'Row data as key-value pairs (required for insert_row, update_row)', - }, - description: { type: 'string', description: "Table description (optional for 'create')" }, - fileId: { - type: 'string', - description: - 'Canonical workspace file ID for create_from_file/import_file. Discover via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json").', - }, - filePath: { - type: 'string', - description: - 'Legacy workspace file reference for create_from_file/import_file. Prefer fileId.', - }, - filter: { - type: 'object', - description: - 'MongoDB-style filter for query_rows, update_rows_by_filter, delete_rows_by_filter', - }, - limit: { - type: 'number', - description: 'Maximum rows to return or affect (optional, default 100)', - }, - name: { type: 'string', description: "Table name (required for 'create')" }, - newName: { type: 'string', description: 'New column name (required for rename_column)' }, - newType: { - type: 'string', - description: - 'New column type (optional for update_column). Types: string, number, boolean, date, json', - }, - offset: { - type: 'number', - description: 'Number of rows to skip (optional for query_rows, default 0)', - }, - outputFormat: { - type: 'string', - description: - 'Explicit format override for outputPath. Usually unnecessary — the file extension determines the format automatically. Only use this to force a different format than what the extension implies.', - enum: ['json', 'csv', 'txt', 'md', 'html'], - }, - outputPath: { - type: 'string', - description: - 'Pipe query_rows results directly to a NEW workspace file. The format is auto-inferred from the file extension: .csv → CSV, .json → JSON, .md → Markdown, etc. Use .csv for tabular exports. Use a flat path like "files/export.csv" — nested paths are not supported.', - }, - rowId: { - type: 'string', - description: 'Row ID (required for get_row, update_row, delete_row)', - }, - rowIds: { - type: 'array', - description: 'Array of row IDs to delete (for batch_delete_rows)', - }, - rows: { - type: 'array', - description: 'Array of row data objects (required for batch_insert_rows)', - }, - schema: { - type: 'object', - description: - "Table schema with columns array (required for 'create'). Each column: { name, type, unique? }", - }, - sort: { - type: 'object', - description: - "Sort specification as { field: 'asc' | 'desc' } (optional for query_rows)", - }, - tableId: { - type: 'string', - description: - "Table ID (required for most operations except 'create' and batch 'delete')", - }, - tableIds: { - type: 'array', - description: 'Array of table IDs (for batch delete)', - items: { type: 'string' }, - }, - unique: { - type: 'boolean', - description: 'Set column unique constraint (optional for update_column)', - }, - updates: { - type: 'array', - description: - 'Array of per-row updates: [{ rowId, data: { col: val } }] (for batch_update_rows)', - }, - values: { - type: 'object', - description: - 'Map of rowId to value for single-column batch update: { "rowId1": val1, "rowId2": val2 } (for batch_update_rows with columnName)', - }, - }, - }, - operation: { - type: 'string', - description: 'The operation to perform', - enum: [ - 'create', - 'create_from_file', - 'import_file', - 'get', - 'get_schema', - 'delete', - 'insert_row', - 'batch_insert_rows', - 'get_row', - 'query_rows', - 'update_row', - 'delete_row', - 'update_rows_by_filter', - 'delete_rows_by_filter', - 'batch_update_rows', - 'batch_delete_rows', - 'add_column', - 'rename_column', - 'delete_column', - 'update_column', - ], - }, - }, - required: ['operation', 'args'], - }, - resultSchema: { - type: 'object', - properties: { - data: { type: 'object', description: 'Operation-specific result payload.' }, - message: { type: 'string', description: 'Human-readable outcome summary.' }, - success: { type: 'boolean', description: 'Whether the operation succeeded.' }, - }, - required: ['success', 'message'], - }, + id: "user_table", + name: "user_table", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"args":{"type":"object","description":"Arguments for the operation","properties":{"column":{"type":"object","description":"Column definition for add_column: { name, type, unique?, position? }"},"columnName":{"type":"string","description":"Column name (required for rename_column, update_column; use columnNames array for batch delete_column)"},"columnNames":{"type":"array","description":"Array of column names to delete at once (for delete_column). Preferred over columnName when deleting multiple columns."},"data":{"type":"object","description":"Row data as key-value pairs (required for insert_row, update_row)"},"description":{"type":"string","description":"Table description (optional for 'create')"},"fileId":{"type":"string","description":"Canonical workspace file ID for create_from_file/import_file. Discover via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\")."},"filePath":{"type":"string","description":"Legacy workspace file reference for create_from_file/import_file. Prefer fileId."},"filter":{"type":"object","description":"MongoDB-style filter for query_rows, update_rows_by_filter, delete_rows_by_filter"},"limit":{"type":"number","description":"Maximum rows to return or affect (optional, default 100)"},"name":{"type":"string","description":"Table name (required for 'create')"},"newName":{"type":"string","description":"New column name (required for rename_column)"},"newType":{"type":"string","description":"New column type (optional for update_column). Types: string, number, boolean, date, json"},"offset":{"type":"number","description":"Number of rows to skip (optional for query_rows, default 0)"},"outputFormat":{"type":"string","description":"Explicit format override for outputPath. Usually unnecessary — the file extension determines the format automatically. Only use this to force a different format than what the extension implies.","enum":["json","csv","txt","md","html"]},"outputPath":{"type":"string","description":"Pipe query_rows results directly to a NEW workspace file. The format is auto-inferred from the file extension: .csv → CSV, .json → JSON, .md → Markdown, etc. Use .csv for tabular exports. Use a flat path like \"files/export.csv\" — nested paths are not supported."},"rowId":{"type":"string","description":"Row ID (required for get_row, update_row, delete_row)"},"rowIds":{"type":"array","description":"Array of row IDs to delete (for batch_delete_rows)"},"rows":{"type":"array","description":"Array of row data objects (required for batch_insert_rows)"},"schema":{"type":"object","description":"Table schema with columns array (required for 'create'). Each column: { name, type, unique? }"},"sort":{"type":"object","description":"Sort specification as { field: 'asc' | 'desc' } (optional for query_rows)"},"tableId":{"type":"string","description":"Table ID (required for most operations except 'create' and batch 'delete')"},"tableIds":{"type":"array","description":"Array of table IDs (for batch delete)","items":{"type":"string"}},"unique":{"type":"boolean","description":"Set column unique constraint (optional for update_column)"},"updates":{"type":"array","description":"Array of per-row updates: [{ rowId, data: { col: val } }] (for batch_update_rows)"},"values":{"type":"object","description":"Map of rowId to value for single-column batch update: { \"rowId1\": val1, \"rowId2\": val2 } (for batch_update_rows with columnName)"}}},"operation":{"type":"string","description":"The operation to perform","enum":["create","create_from_file","import_file","get","get_schema","delete","insert_row","batch_insert_rows","get_row","query_rows","update_row","delete_row","update_rows_by_filter","delete_rows_by_filter","batch_update_rows","batch_delete_rows","add_column","rename_column","delete_column","update_column"]}},"required":["operation","args"]}, + resultSchema: {"type":"object","properties":{"data":{"type":"object","description":"Operation-specific result payload."},"message":{"type":"string","description":"Human-readable outcome summary."},"success":{"type":"boolean","description":"Whether the operation succeeded."}},"required":["success","message"]}, requiresConfirmation: true, -} +}; export const Workflow: ToolCatalogEntry = { - id: 'workflow', - name: 'workflow', - route: 'subagent', - mode: 'async', - parameters: { type: 'object' }, - subagentId: 'workflow', + id: "workflow", + name: "workflow", + route: "subagent", + mode: "async", + parameters: {"type":"object"}, + subagentId: "workflow", internal: true, -} +}; export const WorkspaceFile: ToolCatalogEntry = { - id: 'workspace_file', - name: 'workspace_file', - route: 'sim', - mode: 'async', - parameters: { - type: 'object', - properties: { - operation: { - type: 'string', - description: 'The file operation to perform.', - enum: ['append', 'update', 'patch'], - }, - target: { - type: 'object', - description: 'Explicit file target. Use kind=file_id + fileId for existing files.', - properties: { - fileId: { - type: 'string', - description: 'Canonical existing workspace file ID. Required when target.kind=file_id.', - }, - fileName: { - type: 'string', - description: - 'Plain workspace filename including extension, e.g. "main.py" or "report.docx". Required when target.kind=new_file.', - }, - kind: { - type: 'string', - description: 'How the file target is identified.', - enum: ['new_file', 'file_id'], - }, - }, - required: ['kind'], - }, - title: { - type: 'string', - description: - 'Required short UI label for this content unit, e.g. "Chapter 1", "Slide 3", or "Fix footer spacing".', - }, - contentType: { - type: 'string', - description: - 'Optional MIME type override. Usually omit and let the system infer from the target file extension.', - enum: [ - 'text/markdown', - 'text/html', - 'text/plain', - 'application/json', - 'text/csv', - 'application/vnd.openxmlformats-officedocument.presentationml.presentation', - 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', - 'application/pdf', - ], - }, - edit: { - type: 'object', - description: - 'Patch metadata. Use strategy=search_replace for exact text replacement, or strategy=anchored for line-based inserts/replacements/deletions. The actual replacement/insert content is provided via the paired edit_content tool call.', - properties: { - after_anchor: { - type: 'string', - description: - 'Boundary line kept after inserted replacement content. Required for mode=replace_between.', - }, - anchor: { - type: 'string', - description: - 'Anchor line after which new content is inserted. Required for mode=insert_after.', - }, - before_anchor: { - type: 'string', - description: - 'Boundary line kept before inserted replacement content. Required for mode=replace_between.', - }, - end_anchor: { - type: 'string', - description: 'First line to keep after deletion. Required for mode=delete_between.', - }, - mode: { - type: 'string', - description: 'Anchored edit mode when strategy=anchored.', - enum: ['replace_between', 'insert_after', 'delete_between'], - }, - occurrence: { - type: 'number', - description: '1-based occurrence for repeated anchor lines. Optional; defaults to 1.', - }, - replaceAll: { - type: 'boolean', - description: - 'When true and strategy=search_replace, replace every match instead of requiring a unique single match.', - }, - search: { - type: 'string', - description: - 'Exact text to find when strategy=search_replace. Must match exactly once unless replaceAll=true.', - }, - start_anchor: { - type: 'string', - description: 'First line to delete. Required for mode=delete_between.', - }, - strategy: { - type: 'string', - description: 'Patch strategy.', - enum: ['search_replace', 'anchored'], - }, - }, - }, - newName: { - type: 'string', - description: 'New file name for rename. Must be a plain workspace filename like "main.py".', - }, - }, - required: ['operation', 'target', 'title'], - }, - resultSchema: { - type: 'object', - properties: { - data: { - type: 'object', - description: - 'Optional operation metadata such as file id, file name, size, and content type.', - }, - message: { type: 'string', description: 'Human-readable summary of the outcome.' }, - success: { type: 'boolean', description: 'Whether the file operation succeeded.' }, - }, - required: ['success', 'message'], - }, - requiredPermission: 'write', -} + id: "workspace_file", + name: "workspace_file", + route: "sim", + mode: "async", + parameters: {"type":"object","properties":{"operation":{"type":"string","description":"The file operation to perform.","enum":["append","update","patch"]},"target":{"type":"object","description":"Explicit file target. Use kind=file_id + fileId for existing files.","properties":{"fileId":{"type":"string","description":"Canonical existing workspace file ID. Required when target.kind=file_id."},"fileName":{"type":"string","description":"Plain workspace filename including extension, e.g. \"main.py\" or \"report.docx\". Required when target.kind=new_file."},"kind":{"type":"string","description":"How the file target is identified.","enum":["new_file","file_id"]}},"required":["kind"]},"title":{"type":"string","description":"Required short UI label for this content unit, e.g. \"Chapter 1\", \"Slide 3\", or \"Fix footer spacing\"."},"contentType":{"type":"string","description":"Optional MIME type override. Usually omit and let the system infer from the target file extension.","enum":["text/markdown","text/html","text/plain","application/json","text/csv","application/vnd.openxmlformats-officedocument.presentationml.presentation","application/vnd.openxmlformats-officedocument.wordprocessingml.document","application/pdf"]},"edit":{"type":"object","description":"Patch metadata. Use strategy=search_replace for exact text replacement, or strategy=anchored for line-based inserts/replacements/deletions. The actual replacement/insert content is provided via the paired edit_content tool call.","properties":{"after_anchor":{"type":"string","description":"Boundary line kept after inserted replacement content. Required for mode=replace_between."},"anchor":{"type":"string","description":"Anchor line after which new content is inserted. Required for mode=insert_after."},"before_anchor":{"type":"string","description":"Boundary line kept before inserted replacement content. Required for mode=replace_between."},"end_anchor":{"type":"string","description":"First line to keep after deletion. Required for mode=delete_between."},"mode":{"type":"string","description":"Anchored edit mode when strategy=anchored.","enum":["replace_between","insert_after","delete_between"]},"occurrence":{"type":"number","description":"1-based occurrence for repeated anchor lines. Optional; defaults to 1."},"replaceAll":{"type":"boolean","description":"When true and strategy=search_replace, replace every match instead of requiring a unique single match."},"search":{"type":"string","description":"Exact text to find when strategy=search_replace. Must match exactly once unless replaceAll=true."},"start_anchor":{"type":"string","description":"First line to delete. Required for mode=delete_between."},"strategy":{"type":"string","description":"Patch strategy.","enum":["search_replace","anchored"]}}},"newName":{"type":"string","description":"New file name for rename. Must be a plain workspace filename like \"main.py\"."}},"required":["operation","target","title"]}, + resultSchema: {"type":"object","properties":{"data":{"type":"object","description":"Optional operation metadata such as file id, file name, size, and content type."},"message":{"type":"string","description":"Human-readable summary of the outcome."},"success":{"type":"boolean","description":"Whether the file operation succeeded."}},"required":["success","message"]}, + requiredPermission: "write", +}; export const KnowledgeBaseOperation = { - create: 'create', - get: 'get', - query: 'query', - addFile: 'add_file', - update: 'update', - delete: 'delete', - deleteDocument: 'delete_document', - updateDocument: 'update_document', - listTags: 'list_tags', - createTag: 'create_tag', - updateTag: 'update_tag', - deleteTag: 'delete_tag', - getTagUsage: 'get_tag_usage', - addConnector: 'add_connector', - updateConnector: 'update_connector', - deleteConnector: 'delete_connector', - syncConnector: 'sync_connector', -} as const - -export type KnowledgeBaseOperation = - (typeof KnowledgeBaseOperation)[keyof typeof KnowledgeBaseOperation] + create: "create", + get: "get", + query: "query", + addFile: "add_file", + update: "update", + delete: "delete", + deleteDocument: "delete_document", + updateDocument: "update_document", + listTags: "list_tags", + createTag: "create_tag", + updateTag: "update_tag", + deleteTag: "delete_tag", + getTagUsage: "get_tag_usage", + addConnector: "add_connector", + updateConnector: "update_connector", + deleteConnector: "delete_connector", + syncConnector: "sync_connector", +} as const; + +export type KnowledgeBaseOperation = (typeof KnowledgeBaseOperation)[keyof typeof KnowledgeBaseOperation]; export const KnowledgeBaseOperationValues = [ KnowledgeBaseOperation.create, @@ -3123,47 +854,45 @@ export const KnowledgeBaseOperationValues = [ KnowledgeBaseOperation.updateConnector, KnowledgeBaseOperation.deleteConnector, KnowledgeBaseOperation.syncConnector, -] as const +] as const; export const ManageCredentialOperation = { - rename: 'rename', - delete: 'delete', -} as const + rename: "rename", + delete: "delete", +} as const; -export type ManageCredentialOperation = - (typeof ManageCredentialOperation)[keyof typeof ManageCredentialOperation] +export type ManageCredentialOperation = (typeof ManageCredentialOperation)[keyof typeof ManageCredentialOperation]; export const ManageCredentialOperationValues = [ ManageCredentialOperation.rename, ManageCredentialOperation.delete, -] as const +] as const; export const ManageCustomToolOperation = { - add: 'add', - edit: 'edit', - delete: 'delete', - list: 'list', -} as const + add: "add", + edit: "edit", + delete: "delete", + list: "list", +} as const; -export type ManageCustomToolOperation = - (typeof ManageCustomToolOperation)[keyof typeof ManageCustomToolOperation] +export type ManageCustomToolOperation = (typeof ManageCustomToolOperation)[keyof typeof ManageCustomToolOperation]; export const ManageCustomToolOperationValues = [ ManageCustomToolOperation.add, ManageCustomToolOperation.edit, ManageCustomToolOperation.delete, ManageCustomToolOperation.list, -] as const +] as const; export const ManageJobOperation = { - create: 'create', - list: 'list', - get: 'get', - update: 'update', - delete: 'delete', -} as const + create: "create", + list: "list", + get: "get", + update: "update", + delete: "delete", +} as const; -export type ManageJobOperation = (typeof ManageJobOperation)[keyof typeof ManageJobOperation] +export type ManageJobOperation = (typeof ManageJobOperation)[keyof typeof ManageJobOperation]; export const ManageJobOperationValues = [ ManageJobOperation.create, @@ -3171,67 +900,65 @@ export const ManageJobOperationValues = [ ManageJobOperation.get, ManageJobOperation.update, ManageJobOperation.delete, -] as const +] as const; export const ManageMcpToolOperation = { - add: 'add', - edit: 'edit', - delete: 'delete', - list: 'list', -} as const + add: "add", + edit: "edit", + delete: "delete", + list: "list", +} as const; -export type ManageMcpToolOperation = - (typeof ManageMcpToolOperation)[keyof typeof ManageMcpToolOperation] +export type ManageMcpToolOperation = (typeof ManageMcpToolOperation)[keyof typeof ManageMcpToolOperation]; export const ManageMcpToolOperationValues = [ ManageMcpToolOperation.add, ManageMcpToolOperation.edit, ManageMcpToolOperation.delete, ManageMcpToolOperation.list, -] as const +] as const; export const ManageSkillOperation = { - add: 'add', - edit: 'edit', - delete: 'delete', - list: 'list', -} as const + add: "add", + edit: "edit", + delete: "delete", + list: "list", +} as const; -export type ManageSkillOperation = (typeof ManageSkillOperation)[keyof typeof ManageSkillOperation] +export type ManageSkillOperation = (typeof ManageSkillOperation)[keyof typeof ManageSkillOperation]; export const ManageSkillOperationValues = [ ManageSkillOperation.add, ManageSkillOperation.edit, ManageSkillOperation.delete, ManageSkillOperation.list, -] as const +] as const; export const MaterializeFileOperation = { - save: 'save', - import: 'import', - table: 'table', - knowledgeBase: 'knowledge_base', -} as const + save: "save", + import: "import", + table: "table", + knowledgeBase: "knowledge_base", +} as const; -export type MaterializeFileOperation = - (typeof MaterializeFileOperation)[keyof typeof MaterializeFileOperation] +export type MaterializeFileOperation = (typeof MaterializeFileOperation)[keyof typeof MaterializeFileOperation]; export const MaterializeFileOperationValues = [ MaterializeFileOperation.save, MaterializeFileOperation.import, MaterializeFileOperation.table, MaterializeFileOperation.knowledgeBase, -] as const +] as const; export const UserMemoryOperation = { - add: 'add', - search: 'search', - delete: 'delete', - correct: 'correct', - list: 'list', -} as const + add: "add", + search: "search", + delete: "delete", + correct: "correct", + list: "list", +} as const; -export type UserMemoryOperation = (typeof UserMemoryOperation)[keyof typeof UserMemoryOperation] +export type UserMemoryOperation = (typeof UserMemoryOperation)[keyof typeof UserMemoryOperation]; export const UserMemoryOperationValues = [ UserMemoryOperation.add, @@ -3239,32 +966,32 @@ export const UserMemoryOperationValues = [ UserMemoryOperation.delete, UserMemoryOperation.correct, UserMemoryOperation.list, -] as const +] as const; export const UserTableOperation = { - create: 'create', - createFromFile: 'create_from_file', - importFile: 'import_file', - get: 'get', - getSchema: 'get_schema', - delete: 'delete', - insertRow: 'insert_row', - batchInsertRows: 'batch_insert_rows', - getRow: 'get_row', - queryRows: 'query_rows', - updateRow: 'update_row', - deleteRow: 'delete_row', - updateRowsByFilter: 'update_rows_by_filter', - deleteRowsByFilter: 'delete_rows_by_filter', - batchUpdateRows: 'batch_update_rows', - batchDeleteRows: 'batch_delete_rows', - addColumn: 'add_column', - renameColumn: 'rename_column', - deleteColumn: 'delete_column', - updateColumn: 'update_column', -} as const - -export type UserTableOperation = (typeof UserTableOperation)[keyof typeof UserTableOperation] + create: "create", + createFromFile: "create_from_file", + importFile: "import_file", + get: "get", + getSchema: "get_schema", + delete: "delete", + insertRow: "insert_row", + batchInsertRows: "batch_insert_rows", + getRow: "get_row", + queryRows: "query_rows", + updateRow: "update_row", + deleteRow: "delete_row", + updateRowsByFilter: "update_rows_by_filter", + deleteRowsByFilter: "delete_rows_by_filter", + batchUpdateRows: "batch_update_rows", + batchDeleteRows: "batch_delete_rows", + addColumn: "add_column", + renameColumn: "rename_column", + deleteColumn: "delete_column", + updateColumn: "update_column", +} as const; + +export type UserTableOperation = (typeof UserTableOperation)[keyof typeof UserTableOperation]; export const UserTableOperationValues = [ UserTableOperation.create, @@ -3287,22 +1014,21 @@ export const UserTableOperationValues = [ UserTableOperation.renameColumn, UserTableOperation.deleteColumn, UserTableOperation.updateColumn, -] as const +] as const; export const WorkspaceFileOperation = { - append: 'append', - update: 'update', - patch: 'patch', -} as const + append: "append", + update: "update", + patch: "patch", +} as const; -export type WorkspaceFileOperation = - (typeof WorkspaceFileOperation)[keyof typeof WorkspaceFileOperation] +export type WorkspaceFileOperation = (typeof WorkspaceFileOperation)[keyof typeof WorkspaceFileOperation]; export const WorkspaceFileOperationValues = [ WorkspaceFileOperation.append, WorkspaceFileOperation.update, WorkspaceFileOperation.patch, -] as const +] as const; export const TOOL_CATALOG: Record = { [Agent.id]: Agent, @@ -3392,4 +1118,4 @@ export const TOOL_CATALOG: Record = { [UserTable.id]: UserTable, [Workflow.id]: Workflow, [WorkspaceFile.id]: WorkspaceFile, -} +}; diff --git a/apps/sim/lib/copilot/generated/tool-schemas-v1.ts b/apps/sim/lib/copilot/generated/tool-schemas-v1.ts index 78e624c8473..9d938ce53d7 100644 --- a/apps/sim/lib/copilot/generated/tool-schemas-v1.ts +++ b/apps/sim/lib/copilot/generated/tool-schemas-v1.ts @@ -5,2899 +5,3125 @@ export type JsonSchema = unknown export interface ToolRuntimeSchemaEntry { - parameters?: JsonSchema - resultSchema?: JsonSchema + parameters?: JsonSchema; + resultSchema?: JsonSchema; } export const TOOL_RUNTIME_SCHEMAS: Record = { - agent: { + ["agent"]: { parameters: { - properties: { - request: { - description: 'What tool/skill/MCP action is needed.', - type: 'string', - }, + "properties": { + "request": { + "description": "What tool/skill/MCP action is needed.", + "type": "string" + } }, - required: ['request'], - type: 'object', + "required": [ + "request" + ], + "type": "object" }, resultSchema: undefined, }, - auth: { + ["auth"]: { parameters: { - properties: { - request: { - description: 'What authentication/credential action is needed.', - type: 'string', - }, + "properties": { + "request": { + "description": "What authentication/credential action is needed.", + "type": "string" + } }, - required: ['request'], - type: 'object', + "required": [ + "request" + ], + "type": "object" }, resultSchema: undefined, }, - check_deployment_status: { + ["check_deployment_status"]: { parameters: { - type: 'object', - properties: { - workflowId: { - type: 'string', - description: 'Workflow ID to check (defaults to current workflow)', - }, - }, + "type": "object", + "properties": { + "workflowId": { + "type": "string", + "description": "Workflow ID to check (defaults to current workflow)" + } + } }, resultSchema: undefined, }, - complete_job: { + ["complete_job"]: { parameters: { - type: 'object', - properties: { - jobId: { - type: 'string', - description: 'The ID of the job to mark as completed.', - }, + "type": "object", + "properties": { + "jobId": { + "type": "string", + "description": "The ID of the job to mark as completed." + } }, - required: ['jobId'], + "required": [ + "jobId" + ] }, resultSchema: undefined, }, - context_write: { + ["context_write"]: { parameters: { - type: 'object', - properties: { - content: { - type: 'string', - description: 'Full content to write to the file (replaces existing content)', - }, - file_path: { - type: 'string', - description: "Path of the file to write (e.g. 'SESSION.md')", + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "Full content to write to the file (replaces existing content)" }, + "file_path": { + "type": "string", + "description": "Path of the file to write (e.g. 'SESSION.md')" + } }, - required: ['file_path', 'content'], + "required": [ + "file_path", + "content" + ] }, resultSchema: undefined, }, - crawl_website: { + ["crawl_website"]: { parameters: { - type: 'object', - properties: { - exclude_paths: { - type: 'array', - description: 'Skip URLs matching these patterns', - items: { - type: 'string', - }, - }, - include_paths: { - type: 'array', - description: 'Only crawl URLs matching these patterns', - items: { - type: 'string', - }, + "type": "object", + "properties": { + "exclude_paths": { + "type": "array", + "description": "Skip URLs matching these patterns", + "items": { + "type": "string" + } }, - limit: { - type: 'number', - description: 'Maximum pages to crawl (default 10, max 50)', + "include_paths": { + "type": "array", + "description": "Only crawl URLs matching these patterns", + "items": { + "type": "string" + } }, - max_depth: { - type: 'number', - description: 'How deep to follow links (default 2)', + "limit": { + "type": "number", + "description": "Maximum pages to crawl (default 10, max 50)" }, - url: { - type: 'string', - description: 'Starting URL to crawl from', + "max_depth": { + "type": "number", + "description": "How deep to follow links (default 2)" }, + "url": { + "type": "string", + "description": "Starting URL to crawl from" + } }, - required: ['url'], + "required": [ + "url" + ] }, resultSchema: undefined, }, - create_file: { + ["create_file"]: { parameters: { - type: 'object', - properties: { - contentType: { - type: 'string', - description: - 'Optional MIME type override. Usually omit and let the system infer from the file extension.', - }, - fileName: { - type: 'string', - description: - 'Plain workspace filename including extension, e.g. "main.py" or "report.md". Must not contain slashes.', + "type": "object", + "properties": { + "contentType": { + "type": "string", + "description": "Optional MIME type override. Usually omit and let the system infer from the file extension." }, + "fileName": { + "type": "string", + "description": "Plain workspace filename including extension, e.g. \"main.py\" or \"report.md\". Must not contain slashes." + } }, - required: ['fileName'], + "required": [ + "fileName" + ] }, resultSchema: { - type: 'object', - properties: { - data: { - type: 'object', - description: 'Contains id (the fileId) and name.', - }, - message: { - type: 'string', - description: 'Human-readable outcome.', + "type": "object", + "properties": { + "data": { + "type": "object", + "description": "Contains id (the fileId) and name." }, - success: { - type: 'boolean', - description: 'Whether the file was created.', + "message": { + "type": "string", + "description": "Human-readable outcome." }, + "success": { + "type": "boolean", + "description": "Whether the file was created." + } }, - required: ['success', 'message'], + "required": [ + "success", + "message" + ] }, }, - create_folder: { + ["create_folder"]: { parameters: { - type: 'object', - properties: { - name: { - type: 'string', - description: 'Folder name.', - }, - parentId: { - type: 'string', - description: 'Optional parent folder ID.', + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Folder name." }, - workspaceId: { - type: 'string', - description: 'Optional workspace ID.', + "parentId": { + "type": "string", + "description": "Optional parent folder ID." }, + "workspaceId": { + "type": "string", + "description": "Optional workspace ID." + } }, - required: ['name'], + "required": [ + "name" + ] }, resultSchema: undefined, }, - create_job: { + ["create_job"]: { parameters: { - type: 'object', - properties: { - cron: { - type: 'string', - description: - "Cron expression for recurring jobs (e.g., '*/5 * * * *' for every 5 minutes, '0 9 * * *' for daily at 9 AM). Omit for one-time jobs.", + "type": "object", + "properties": { + "cron": { + "type": "string", + "description": "Cron expression for recurring jobs (e.g., '*/5 * * * *' for every 5 minutes, '0 9 * * *' for daily at 9 AM). Omit for one-time jobs." }, - lifecycle: { - type: 'string', - description: - "'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called after the success condition is met.", - enum: ['persistent', 'until_complete'], + "lifecycle": { + "type": "string", + "description": "'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called after the success condition is met.", + "enum": [ + "persistent", + "until_complete" + ] }, - maxRuns: { - type: 'integer', - description: - 'Maximum number of executions before the job auto-completes. Safety limit to prevent runaway polling.', + "maxRuns": { + "type": "integer", + "description": "Maximum number of executions before the job auto-completes. Safety limit to prevent runaway polling." }, - prompt: { - type: 'string', - description: - 'The prompt to execute when the job fires. This is sent to the Mothership as a user message.', + "prompt": { + "type": "string", + "description": "The prompt to execute when the job fires. This is sent to the Mothership as a user message." }, - successCondition: { - type: 'string', - description: - "What must happen for the job to be considered complete. Used with until_complete lifecycle (e.g., 'John has replied to the partnership email').", + "successCondition": { + "type": "string", + "description": "What must happen for the job to be considered complete. Used with until_complete lifecycle (e.g., 'John has replied to the partnership email')." }, - time: { - type: 'string', - description: - "ISO 8601 datetime for one-time execution or as the start time for a cron schedule (e.g., '2026-03-06T09:00:00'). Include timezone offset or use the timezone parameter.", + "time": { + "type": "string", + "description": "ISO 8601 datetime for one-time execution or as the start time for a cron schedule (e.g., '2026-03-06T09:00:00'). Include timezone offset or use the timezone parameter." }, - timezone: { - type: 'string', - description: - "IANA timezone for the schedule (e.g., 'America/New_York', 'Europe/London'). Defaults to UTC.", - }, - title: { - type: 'string', - description: - "A short, descriptive title for the job (e.g., 'Email Poller', 'Daily Report'). Used as the display name.", + "timezone": { + "type": "string", + "description": "IANA timezone for the schedule (e.g., 'America/New_York', 'Europe/London'). Defaults to UTC." }, + "title": { + "type": "string", + "description": "A short, descriptive title for the job (e.g., 'Email Poller', 'Daily Report'). Used as the display name." + } }, - required: ['title', 'prompt'], + "required": [ + "title", + "prompt" + ] }, resultSchema: undefined, }, - create_workflow: { + ["create_workflow"]: { parameters: { - type: 'object', - properties: { - description: { - type: 'string', - description: 'Optional workflow description.', - }, - folderId: { - type: 'string', - description: 'Optional folder ID.', + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "Optional workflow description." }, - name: { - type: 'string', - description: 'Workflow name.', + "folderId": { + "type": "string", + "description": "Optional folder ID." }, - workspaceId: { - type: 'string', - description: 'Optional workspace ID.', + "name": { + "type": "string", + "description": "Workflow name." }, + "workspaceId": { + "type": "string", + "description": "Optional workspace ID." + } }, - required: ['name'], + "required": [ + "name" + ] }, resultSchema: undefined, }, - create_workspace_mcp_server: { + ["create_workspace_mcp_server"]: { parameters: { - type: 'object', - properties: { - description: { - type: 'string', - description: 'Optional description for the server', + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "Optional description for the server" }, - name: { - type: 'string', - description: 'Required: server name', - }, - workspaceId: { - type: 'string', - description: 'Workspace ID (defaults to current workspace)', + "name": { + "type": "string", + "description": "Required: server name" }, + "workspaceId": { + "type": "string", + "description": "Workspace ID (defaults to current workspace)" + } }, - required: ['name'], + "required": [ + "name" + ] }, resultSchema: undefined, }, - debug: { + ["debug"]: { parameters: { - properties: { - context: { - description: - 'Pre-gathered context: workflow state JSON, block schemas, error logs. The debug agent will skip re-reading anything included here.', - type: 'string', - }, - request: { - description: - 'What to debug. Include error messages, block IDs, and any context about the failure.', - type: 'string', + "properties": { + "context": { + "description": "Pre-gathered context: workflow state JSON, block schemas, error logs. The debug agent will skip re-reading anything included here.", + "type": "string" }, + "request": { + "description": "What to debug. Include error messages, block IDs, and any context about the failure.", + "type": "string" + } }, - required: ['request'], - type: 'object', + "required": [ + "request" + ], + "type": "object" }, resultSchema: undefined, }, - delete_file: { + ["delete_file"]: { parameters: { - type: 'object', - properties: { - fileIds: { - type: 'array', - description: 'Canonical workspace file IDs of the files to delete.', - items: { - type: 'string', - }, - }, + "type": "object", + "properties": { + "fileIds": { + "type": "array", + "description": "Canonical workspace file IDs of the files to delete.", + "items": { + "type": "string" + } + } }, - required: ['fileIds'], + "required": [ + "fileIds" + ] }, resultSchema: { - type: 'object', - properties: { - message: { - type: 'string', - description: 'Human-readable outcome.', - }, - success: { - type: 'boolean', - description: 'Whether the delete succeeded.', + "type": "object", + "properties": { + "message": { + "type": "string", + "description": "Human-readable outcome." }, + "success": { + "type": "boolean", + "description": "Whether the delete succeeded." + } }, - required: ['success', 'message'], + "required": [ + "success", + "message" + ] }, }, - delete_folder: { + ["delete_folder"]: { parameters: { - type: 'object', - properties: { - folderIds: { - type: 'array', - description: 'The folder IDs to delete.', - items: { - type: 'string', - }, - }, + "type": "object", + "properties": { + "folderIds": { + "type": "array", + "description": "The folder IDs to delete.", + "items": { + "type": "string" + } + } }, - required: ['folderIds'], + "required": [ + "folderIds" + ] }, resultSchema: undefined, }, - delete_workflow: { + ["delete_workflow"]: { parameters: { - type: 'object', - properties: { - workflowIds: { - type: 'array', - description: 'The workflow IDs to delete.', - items: { - type: 'string', - }, - }, + "type": "object", + "properties": { + "workflowIds": { + "type": "array", + "description": "The workflow IDs to delete.", + "items": { + "type": "string" + } + } }, - required: ['workflowIds'], + "required": [ + "workflowIds" + ] }, resultSchema: undefined, }, - delete_workspace_mcp_server: { + ["delete_workspace_mcp_server"]: { parameters: { - type: 'object', - properties: { - serverId: { - type: 'string', - description: 'Required: the MCP server ID to delete', - }, + "type": "object", + "properties": { + "serverId": { + "type": "string", + "description": "Required: the MCP server ID to delete" + } }, - required: ['serverId'], + "required": [ + "serverId" + ] }, resultSchema: undefined, }, - deploy: { + ["deploy"]: { parameters: { - properties: { - request: { - description: - 'Detailed deployment instructions. Include deployment type (api/chat) and ALL user-specified options: identifier, title, description, authType, password, allowedEmails, welcomeMessage, outputConfigs (block outputs to display).', - type: 'string', - }, + "properties": { + "request": { + "description": "Detailed deployment instructions. Include deployment type (api/chat) and ALL user-specified options: identifier, title, description, authType, password, allowedEmails, welcomeMessage, outputConfigs (block outputs to display).", + "type": "string" + } }, - required: ['request'], - type: 'object', + "required": [ + "request" + ], + "type": "object" }, resultSchema: undefined, }, - deploy_api: { + ["deploy_api"]: { parameters: { - type: 'object', - properties: { - action: { - type: 'string', - description: 'Whether to deploy or undeploy the API endpoint', - enum: ['deploy', 'undeploy'], - default: 'deploy', - }, - workflowId: { - type: 'string', - description: 'Workflow ID to deploy (required in workspace context)', + "type": "object", + "properties": { + "action": { + "type": "string", + "description": "Whether to deploy or undeploy the API endpoint", + "enum": [ + "deploy", + "undeploy" + ], + "default": "deploy" }, - }, + "workflowId": { + "type": "string", + "description": "Workflow ID to deploy (required in workspace context)" + } + } }, resultSchema: { - type: 'object', - properties: { - apiEndpoint: { - type: 'string', - description: 'Canonical workflow execution endpoint.', - }, - baseUrl: { - type: 'string', - description: 'Base URL used to construct deployment URLs.', - }, - deployedAt: { - type: 'string', - description: 'Deployment timestamp when the workflow is deployed.', - }, - deploymentConfig: { - type: 'object', - description: - 'Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details.', - }, - deploymentStatus: { - type: 'object', - description: - 'Structured per-surface deployment status keyed by surface name, such as api.', - }, - deploymentType: { - type: 'string', - description: - 'Deployment surface this result describes. For deploy_api and redeploy this is always "api".', - }, - examples: { - type: 'object', - description: - 'Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling.', - }, - isDeployed: { - type: 'boolean', - description: 'Whether the workflow API is currently deployed after this tool call.', - }, - version: { - type: 'number', - description: 'Deployment version for the current API deployment.', - }, - workflowId: { - type: 'string', - description: 'Workflow ID that was deployed or undeployed.', - }, - }, - required: [ - 'workflowId', - 'isDeployed', - 'deploymentType', - 'deploymentStatus', - 'deploymentConfig', - 'examples', - ], - }, - }, - deploy_chat: { - parameters: { - type: 'object', - properties: { - action: { - type: 'string', - description: 'Whether to deploy or undeploy the chat interface', - enum: ['deploy', 'undeploy'], - default: 'deploy', - }, - allowedEmails: { - type: 'array', - description: 'List of allowed emails/domains for email or SSO auth', - items: { - type: 'string', - }, - }, - authType: { - type: 'string', - description: 'Authentication type: public, password, email, or sso', - enum: ['public', 'password', 'email', 'sso'], - default: 'public', - }, - description: { - type: 'string', - description: 'Optional description for the chat', - }, - identifier: { - type: 'string', - description: 'URL slug for the chat (lowercase letters, numbers, hyphens only)', - }, - outputConfigs: { - type: 'array', - description: 'Output configurations specifying which block outputs to display in chat', - items: { - type: 'object', - properties: { - blockId: { - type: 'string', - description: 'The block UUID', - }, - path: { - type: 'string', - description: "The output path (e.g. 'response', 'response.content')", + "type": "object", + "properties": { + "apiEndpoint": { + "type": "string", + "description": "Canonical workflow execution endpoint." + }, + "baseUrl": { + "type": "string", + "description": "Base URL used to construct deployment URLs." + }, + "deployedAt": { + "type": "string", + "description": "Deployment timestamp when the workflow is deployed." + }, + "deploymentConfig": { + "type": "object", + "description": "Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details." + }, + "deploymentStatus": { + "type": "object", + "description": "Structured per-surface deployment status keyed by surface name, such as api." + }, + "deploymentType": { + "type": "string", + "description": "Deployment surface this result describes. For deploy_api and redeploy this is always \"api\"." + }, + "examples": { + "type": "object", + "description": "Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling." + }, + "isDeployed": { + "type": "boolean", + "description": "Whether the workflow API is currently deployed after this tool call." + }, + "version": { + "type": "number", + "description": "Deployment version for the current API deployment." + }, + "workflowId": { + "type": "string", + "description": "Workflow ID that was deployed or undeployed." + } + }, + "required": [ + "workflowId", + "isDeployed", + "deploymentType", + "deploymentStatus", + "deploymentConfig", + "examples" + ] + }, + }, + ["deploy_chat"]: { + parameters: { + "type": "object", + "properties": { + "action": { + "type": "string", + "description": "Whether to deploy or undeploy the chat interface", + "enum": [ + "deploy", + "undeploy" + ], + "default": "deploy" + }, + "allowedEmails": { + "type": "array", + "description": "List of allowed emails/domains for email or SSO auth", + "items": { + "type": "string" + } + }, + "authType": { + "type": "string", + "description": "Authentication type: public, password, email, or sso", + "enum": [ + "public", + "password", + "email", + "sso" + ], + "default": "public" + }, + "description": { + "type": "string", + "description": "Optional description for the chat" + }, + "identifier": { + "type": "string", + "description": "URL slug for the chat (lowercase letters, numbers, hyphens only)" + }, + "outputConfigs": { + "type": "array", + "description": "Output configurations specifying which block outputs to display in chat", + "items": { + "type": "object", + "properties": { + "blockId": { + "type": "string", + "description": "The block UUID" }, - }, - required: ['blockId', 'path'], - }, - }, - password: { - type: 'string', - description: 'Password for password-protected chats', - }, - title: { - type: 'string', - description: 'Display title for the chat interface', - }, - welcomeMessage: { - type: 'string', - description: 'Welcome message shown to users', - }, - workflowId: { - type: 'string', - description: 'Workflow ID to deploy (required in workspace context)', - }, - }, + "path": { + "type": "string", + "description": "The output path (e.g. 'response', 'response.content')" + } + }, + "required": [ + "blockId", + "path" + ] + } + }, + "password": { + "type": "string", + "description": "Password for password-protected chats" + }, + "title": { + "type": "string", + "description": "Display title for the chat interface" + }, + "welcomeMessage": { + "type": "string", + "description": "Welcome message shown to users" + }, + "workflowId": { + "type": "string", + "description": "Workflow ID to deploy (required in workspace context)" + } + } }, resultSchema: { - type: 'object', - properties: { - action: { - type: 'string', - description: 'Action performed by the tool, such as "deploy" or "undeploy".', - }, - apiEndpoint: { - type: 'string', - description: 'Paired workflow execution endpoint used by the chat deployment.', - }, - baseUrl: { - type: 'string', - description: 'Base URL used to construct deployment URLs.', - }, - chatUrl: { - type: 'string', - description: 'Shareable chat URL when the chat surface is deployed.', - }, - deployedAt: { - type: 'string', - description: 'Deployment timestamp for the underlying workflow deployment.', - }, - deploymentConfig: { - type: 'object', - description: - 'Structured deployment configuration keyed by surface name. Includes chat settings and the paired API invocation configuration.', - }, - deploymentStatus: { - type: 'object', - description: - 'Structured per-surface deployment status keyed by surface name, including api and chat.', - }, - deploymentType: { - type: 'string', - description: - 'Deployment surface this result describes. For deploy_chat this is always "chat".', - }, - examples: { - type: 'object', - description: - 'Invocation examples keyed by surface name. Includes chat access details and API curl examples.', - }, - identifier: { - type: 'string', - description: 'Chat identifier or slug.', - }, - isChatDeployed: { - type: 'boolean', - description: 'Whether the chat surface is deployed after this tool call.', - }, - isDeployed: { - type: 'boolean', - description: 'Whether the paired API surface remains deployed after this tool call.', - }, - success: { - type: 'boolean', - description: 'Whether the deploy_chat action completed successfully.', - }, - version: { - type: 'number', - description: 'Deployment version for the underlying workflow deployment.', - }, - workflowId: { - type: 'string', - description: 'Workflow ID associated with the chat deployment.', - }, - }, - required: [ - 'workflowId', - 'success', - 'action', - 'isDeployed', - 'isChatDeployed', - 'deploymentType', - 'deploymentStatus', - 'deploymentConfig', - 'examples', - ], - }, - }, - deploy_mcp: { - parameters: { - type: 'object', - properties: { - parameterDescriptions: { - type: 'array', - description: 'Array of parameter descriptions for the tool', - items: { - type: 'object', - properties: { - description: { - type: 'string', - description: 'Parameter description', - }, - name: { - type: 'string', - description: 'Parameter name', + "type": "object", + "properties": { + "action": { + "type": "string", + "description": "Action performed by the tool, such as \"deploy\" or \"undeploy\"." + }, + "apiEndpoint": { + "type": "string", + "description": "Paired workflow execution endpoint used by the chat deployment." + }, + "baseUrl": { + "type": "string", + "description": "Base URL used to construct deployment URLs." + }, + "chatUrl": { + "type": "string", + "description": "Shareable chat URL when the chat surface is deployed." + }, + "deployedAt": { + "type": "string", + "description": "Deployment timestamp for the underlying workflow deployment." + }, + "deploymentConfig": { + "type": "object", + "description": "Structured deployment configuration keyed by surface name. Includes chat settings and the paired API invocation configuration." + }, + "deploymentStatus": { + "type": "object", + "description": "Structured per-surface deployment status keyed by surface name, including api and chat." + }, + "deploymentType": { + "type": "string", + "description": "Deployment surface this result describes. For deploy_chat this is always \"chat\"." + }, + "examples": { + "type": "object", + "description": "Invocation examples keyed by surface name. Includes chat access details and API curl examples." + }, + "identifier": { + "type": "string", + "description": "Chat identifier or slug." + }, + "isChatDeployed": { + "type": "boolean", + "description": "Whether the chat surface is deployed after this tool call." + }, + "isDeployed": { + "type": "boolean", + "description": "Whether the paired API surface remains deployed after this tool call." + }, + "success": { + "type": "boolean", + "description": "Whether the deploy_chat action completed successfully." + }, + "version": { + "type": "number", + "description": "Deployment version for the underlying workflow deployment." + }, + "workflowId": { + "type": "string", + "description": "Workflow ID associated with the chat deployment." + } + }, + "required": [ + "workflowId", + "success", + "action", + "isDeployed", + "isChatDeployed", + "deploymentType", + "deploymentStatus", + "deploymentConfig", + "examples" + ] + }, + }, + ["deploy_mcp"]: { + parameters: { + "type": "object", + "properties": { + "parameterDescriptions": { + "type": "array", + "description": "Array of parameter descriptions for the tool", + "items": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "Parameter description" }, - }, - required: ['name', 'description'], - }, - }, - serverId: { - type: 'string', - description: 'Required: server ID from list_workspace_mcp_servers', - }, - toolDescription: { - type: 'string', - description: 'Description for the MCP tool', - }, - toolName: { - type: 'string', - description: 'Name for the MCP tool (defaults to workflow name)', - }, - workflowId: { - type: 'string', - description: 'Workflow ID (defaults to active workflow)', - }, - }, - required: ['serverId'], + "name": { + "type": "string", + "description": "Parameter name" + } + }, + "required": [ + "name", + "description" + ] + } + }, + "serverId": { + "type": "string", + "description": "Required: server ID from list_workspace_mcp_servers" + }, + "toolDescription": { + "type": "string", + "description": "Description for the MCP tool" + }, + "toolName": { + "type": "string", + "description": "Name for the MCP tool (defaults to workflow name)" + }, + "workflowId": { + "type": "string", + "description": "Workflow ID (defaults to active workflow)" + } + }, + "required": [ + "serverId" + ] }, resultSchema: { - type: 'object', - properties: { - action: { - type: 'string', - description: 'Action performed by the tool, such as "deploy" or "undeploy".', + "type": "object", + "properties": { + "action": { + "type": "string", + "description": "Action performed by the tool, such as \"deploy\" or \"undeploy\"." }, - apiEndpoint: { - type: 'string', - description: 'Underlying workflow API endpoint associated with the MCP tool.', + "apiEndpoint": { + "type": "string", + "description": "Underlying workflow API endpoint associated with the MCP tool." }, - baseUrl: { - type: 'string', - description: 'Base URL used to construct deployment URLs.', + "baseUrl": { + "type": "string", + "description": "Base URL used to construct deployment URLs." }, - deploymentConfig: { - type: 'object', - description: - 'Structured deployment configuration keyed by surface name. Includes MCP server, tool, auth, and parameter schema details.', + "deploymentConfig": { + "type": "object", + "description": "Structured deployment configuration keyed by surface name. Includes MCP server, tool, auth, and parameter schema details." }, - deploymentStatus: { - type: 'object', - description: - 'Structured per-surface deployment status keyed by surface name, including mcp and the underlying api surface when applicable.', + "deploymentStatus": { + "type": "object", + "description": "Structured per-surface deployment status keyed by surface name, including mcp and the underlying api surface when applicable." }, - deploymentType: { - type: 'string', - description: - 'Deployment surface this result describes. For deploy_mcp this is always "mcp".', + "deploymentType": { + "type": "string", + "description": "Deployment surface this result describes. For deploy_mcp this is always \"mcp\"." }, - examples: { - type: 'object', - description: - 'Setup examples keyed by surface name. Includes ready-to-paste config snippets for supported MCP clients.', + "examples": { + "type": "object", + "description": "Setup examples keyed by surface name. Includes ready-to-paste config snippets for supported MCP clients." }, - mcpServerUrl: { - type: 'string', - description: 'HTTP MCP server URL to configure in clients.', + "mcpServerUrl": { + "type": "string", + "description": "HTTP MCP server URL to configure in clients." }, - removed: { - type: 'boolean', - description: 'Whether the MCP deployment was removed during an undeploy action.', + "removed": { + "type": "boolean", + "description": "Whether the MCP deployment was removed during an undeploy action." }, - serverId: { - type: 'string', - description: 'Workspace MCP server ID.', + "serverId": { + "type": "string", + "description": "Workspace MCP server ID." }, - serverName: { - type: 'string', - description: 'Workspace MCP server name.', + "serverName": { + "type": "string", + "description": "Workspace MCP server name." }, - toolDescription: { - type: 'string', - description: 'MCP tool description exposed on the server.', + "toolDescription": { + "type": "string", + "description": "MCP tool description exposed on the server." }, - toolId: { - type: 'string', - description: 'MCP tool ID when deployed.', + "toolId": { + "type": "string", + "description": "MCP tool ID when deployed." }, - toolName: { - type: 'string', - description: 'MCP tool name exposed on the server.', + "toolName": { + "type": "string", + "description": "MCP tool name exposed on the server." }, - updated: { - type: 'boolean', - description: 'Whether an existing MCP tool deployment was updated instead of created.', - }, - workflowId: { - type: 'string', - description: 'Workflow ID associated with the MCP deployment.', + "updated": { + "type": "boolean", + "description": "Whether an existing MCP tool deployment was updated instead of created." }, + "workflowId": { + "type": "string", + "description": "Workflow ID associated with the MCP deployment." + } }, - required: ['deploymentType', 'deploymentStatus'], + "required": [ + "deploymentType", + "deploymentStatus" + ] }, }, - download_to_workspace_file: { + ["download_to_workspace_file"]: { parameters: { - type: 'object', - properties: { - fileName: { - type: 'string', - description: - 'Optional workspace file name to save as. If omitted, the name is inferred from the response or URL.', - }, - url: { - type: 'string', - description: - 'Direct URL of the file to download, such as an image CDN URL ending in .png or .jpg', + "type": "object", + "properties": { + "fileName": { + "type": "string", + "description": "Optional workspace file name to save as. If omitted, the name is inferred from the response or URL." }, + "url": { + "type": "string", + "description": "Direct URL of the file to download, such as an image CDN URL ending in .png or .jpg" + } }, - required: ['url'], + "required": [ + "url" + ] }, resultSchema: undefined, }, - edit_content: { + ["edit_content"]: { parameters: { - type: 'object', - properties: { - content: { - type: 'string', - description: - 'The text content to write. For append: text to append. For update: full replacement text. For patch with search_replace: the replacement text. For patch with anchored: the insert/replacement text.', - }, + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "The text content to write. For append: text to append. For update: full replacement text. For patch with search_replace: the replacement text. For patch with anchored: the insert/replacement text." + } }, - required: ['content'], + "required": [ + "content" + ] }, resultSchema: { - type: 'object', - properties: { - data: { - type: 'object', - description: - 'Optional operation metadata such as file id, file name, size, and content type.', - }, - message: { - type: 'string', - description: 'Human-readable summary of the outcome.', - }, - success: { - type: 'boolean', - description: 'Whether the content was applied successfully.', - }, - }, - required: ['success', 'message'], - }, - }, - edit_workflow: { - parameters: { - type: 'object', - properties: { - operations: { - type: 'array', - description: 'Array of edit operations', - items: { - type: 'object', - properties: { - block_id: { - type: 'string', - description: - 'Block ID for the operation. For add operations, this will be the desired ID for the new block.', + "type": "object", + "properties": { + "data": { + "type": "object", + "description": "Optional operation metadata such as file id, file name, size, and content type." + }, + "message": { + "type": "string", + "description": "Human-readable summary of the outcome." + }, + "success": { + "type": "boolean", + "description": "Whether the content was applied successfully." + } + }, + "required": [ + "success", + "message" + ] + }, + }, + ["edit_workflow"]: { + parameters: { + "type": "object", + "properties": { + "operations": { + "type": "array", + "description": "Array of edit operations", + "items": { + "type": "object", + "properties": { + "block_id": { + "type": "string", + "description": "Block ID for the operation. For add operations, this will be the desired ID for the new block." }, - operation_type: { - type: 'string', - description: 'Type of operation to perform', - enum: ['add', 'edit', 'delete', 'insert_into_subflow', 'extract_from_subflow'], + "operation_type": { + "type": "string", + "description": "Type of operation to perform", + "enum": [ + "add", + "edit", + "delete", + "insert_into_subflow", + "extract_from_subflow" + ] }, - params: { - type: 'object', - description: - 'Parameters for the operation. \nFor edit: {"inputs": {"temperature": 0.5}} NOT {"subBlocks": {"temperature": {"value": 0.5}}}\nFor add: {"type": "agent", "name": "My Agent", "inputs": {"model": "gpt-4o"}}\nFor delete: {} (empty object)', - }, - }, - required: ['operation_type', 'block_id', 'params'], - }, - }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID to edit. If not provided, uses the current workflow in context.', + "params": { + "type": "object", + "description": "Parameters for the operation. \nFor edit: {\"inputs\": {\"temperature\": 0.5}} NOT {\"subBlocks\": {\"temperature\": {\"value\": 0.5}}}\nFor add: {\"type\": \"agent\", \"name\": \"My Agent\", \"inputs\": {\"model\": \"gpt-4o\"}}\nFor delete: {} (empty object)" + } + }, + "required": [ + "operation_type", + "block_id", + "params" + ] + } }, + "workflowId": { + "type": "string", + "description": "Optional workflow ID to edit. If not provided, uses the current workflow in context." + } }, - required: ['operations'], + "required": [ + "operations" + ] }, resultSchema: undefined, }, - file: { - parameters: { - type: 'object', - }, + ["file"]: { + parameters: { + "type": "object" + }, + resultSchema: undefined, + }, + ["function_execute"]: { + parameters: { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "Code to execute. For JS: raw statements auto-wrapped in async context. For Python: full script. For shell: bash script with access to pre-installed CLI tools and workspace env vars as $VAR_NAME." + }, + "inputFiles": { + "type": "array", + "description": "Canonical workspace file IDs to mount in the sandbox. Discover IDs via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\"). Mounted path: /home/user/files/{fileId}/{originalName}. Example: [\"wf_123\"]", + "items": { + "type": "string" + } + }, + "inputTables": { + "type": "array", + "description": "Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Example: [\"tbl_abc123\"]", + "items": { + "type": "string" + } + }, + "language": { + "type": "string", + "description": "Execution language.", + "enum": [ + "javascript", + "python", + "shell" + ] + }, + "outputFormat": { + "type": "string", + "description": "Format for outputPath. Determines how the code result is serialized. If omitted, inferred from outputPath file extension.", + "enum": [ + "json", + "csv", + "txt", + "md", + "html" + ] + }, + "outputMimeType": { + "type": "string", + "description": "MIME type for outputSandboxPath export. Required for binary files: image/png, image/jpeg, application/pdf, etc. Omit for text files." + }, + "outputPath": { + "type": "string", + "description": "Pipe output directly to a NEW workspace file instead of returning in context. ALWAYS use this instead of a separate workspace_file write call. Use a flat path like \"files/result.json\" — nested paths are not supported." + }, + "outputSandboxPath": { + "type": "string", + "description": "Path to a file created inside the sandbox that should be exported to the workspace. Use together with outputPath." + }, + "outputTable": { + "type": "string", + "description": "Table ID to overwrite with the code's return value. Code MUST return an array of objects where keys match column names. All existing rows are replaced. Example: \"tbl_abc123\"" + } + }, + "required": [ + "code" + ] + }, resultSchema: undefined, }, - function_execute: { - parameters: { - type: 'object', - properties: { - code: { - type: 'string', - description: - 'Code to execute. For JS: raw statements auto-wrapped in async context. For Python: full script. For shell: bash script with access to pre-installed CLI tools and workspace env vars as $VAR_NAME.', - }, - inputFiles: { - type: 'array', - description: - 'Canonical workspace file IDs to mount in the sandbox. Discover IDs via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json"). Mounted path: /home/user/files/{fileId}/{originalName}. Example: ["wf_123"]', - items: { - type: 'string', - }, - }, - inputTables: { - type: 'array', - description: - 'Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Example: ["tbl_abc123"]', - items: { - type: 'string', - }, - }, - language: { - type: 'string', - description: 'Execution language.', - enum: ['javascript', 'python', 'shell'], - }, - outputFormat: { - type: 'string', - description: - 'Format for outputPath. Determines how the code result is serialized. If omitted, inferred from outputPath file extension.', - enum: ['json', 'csv', 'txt', 'md', 'html'], - }, - outputMimeType: { - type: 'string', - description: - 'MIME type for outputSandboxPath export. Required for binary files: image/png, image/jpeg, application/pdf, etc. Omit for text files.', - }, - outputPath: { - type: 'string', - description: - 'Pipe output directly to a NEW workspace file instead of returning in context. ALWAYS use this instead of a separate workspace_file write call. Use a flat path like "files/result.json" — nested paths are not supported.', - }, - outputSandboxPath: { - type: 'string', - description: - 'Path to a file created inside the sandbox that should be exported to the workspace. Use together with outputPath.', - }, - outputTable: { - type: 'string', - description: - 'Table ID to overwrite with the code\'s return value. Code MUST return an array of objects where keys match column names. All existing rows are replaced. Example: "tbl_abc123"', - }, - }, - required: ['code'], + ["generate_api_key"]: { + parameters: { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "A descriptive name for the API key (e.g., 'production-key', 'dev-testing')." + }, + "workspaceId": { + "type": "string", + "description": "Optional workspace ID. Defaults to user's default workspace." + } + }, + "required": [ + "name" + ] }, resultSchema: undefined, }, - generate_api_key: { + ["generate_image"]: { parameters: { - type: 'object', - properties: { - name: { - type: 'string', - description: - "A descriptive name for the API key (e.g., 'production-key', 'dev-testing').", - }, - workspaceId: { - type: 'string', - description: "Optional workspace ID. Defaults to user's default workspace.", - }, + "type": "object", + "properties": { + "aspectRatio": { + "type": "string", + "description": "Aspect ratio for the generated image.", + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ] + }, + "fileName": { + "type": "string", + "description": "Output file name. Defaults to \"generated-image.png\". Workspace files are flat, so pass a plain file name, not a nested path." + }, + "overwriteFileId": { + "type": "string", + "description": "If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated image so the existing chat resource stays current instead of creating a duplicate like \"image (1).png\". The file ID is returned by previous generate_image or generate_visualization calls (fileId field), or can be found via read(\"files/by-id/{fileId}/meta.json\")." + }, + "prompt": { + "type": "string", + "description": "Detailed text description of the image to generate, or editing instructions when used with editFileId." + }, + "referenceFileIds": { + "type": "array", + "description": "File IDs of workspace images to include as context for the generation. All images are sent alongside the prompt. Use for: editing a single image (1 file), compositing multiple images together (2+ files), style transfer, face swapping, etc. Order matters — list the primary/base image first. When revising an existing image in place, pair the primary file ID here with overwriteFileId set to that same ID.", + "items": { + "type": "string" + } + } }, - required: ['name'], + "required": [ + "prompt" + ] }, resultSchema: undefined, }, - generate_image: { + ["generate_visualization"]: { parameters: { - type: 'object', - properties: { - aspectRatio: { - type: 'string', - description: 'Aspect ratio for the generated image.', - enum: ['1:1', '16:9', '9:16', '4:3', '3:4'], - }, - fileName: { - type: 'string', - description: - 'Output file name. Defaults to "generated-image.png". Workspace files are flat, so pass a plain file name, not a nested path.', + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "Python code that generates a visualization using matplotlib. MUST call plt.savefig('/home/user/output.png', dpi=150, bbox_inches='tight') to produce output." }, - overwriteFileId: { - type: 'string', - description: - 'If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated image so the existing chat resource stays current instead of creating a duplicate like "image (1).png". The file ID is returned by previous generate_image or generate_visualization calls (fileId field), or can be found via read("files/by-id/{fileId}/meta.json").', + "fileName": { + "type": "string", + "description": "Output file name. Defaults to \"chart.png\". Workspace files are flat, so pass a plain file name, not a nested path." }, - prompt: { - type: 'string', - description: - 'Detailed text description of the image to generate, or editing instructions when used with editFileId.', + "inputFiles": { + "type": "array", + "description": "Canonical workspace file IDs to mount in the sandbox. Discover IDs via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\"). Mounted path: /home/user/files/{fileId}/{originalName}.", + "items": { + "type": "string" + } }, - referenceFileIds: { - type: 'array', - description: - 'File IDs of workspace images to include as context for the generation. All images are sent alongside the prompt. Use for: editing a single image (1 file), compositing multiple images together (2+ files), style transfer, face swapping, etc. Order matters — list the primary/base image first. When revising an existing image in place, pair the primary file ID here with overwriteFileId set to that same ID.', - items: { - type: 'string', - }, + "inputTables": { + "type": "array", + "description": "Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Read with pandas: pd.read_csv('/home/user/tables/tbl_xxx.csv')", + "items": { + "type": "string" + } }, + "overwriteFileId": { + "type": "string", + "description": "If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated chart so the existing chat resource stays current instead of creating a duplicate like \"chart (1).png\". The file ID is returned by previous generate_visualization or generate_image calls (fileId field), or can be found via read(\"files/by-id/{fileId}/meta.json\")." + } }, - required: ['prompt'], + "required": [ + "code" + ] }, resultSchema: undefined, }, - generate_visualization: { + ["get_block_outputs"]: { parameters: { - type: 'object', - properties: { - code: { - type: 'string', - description: - "Python code that generates a visualization using matplotlib. MUST call plt.savefig('/home/user/output.png', dpi=150, bbox_inches='tight') to produce output.", - }, - fileName: { - type: 'string', - description: - 'Output file name. Defaults to "chart.png". Workspace files are flat, so pass a plain file name, not a nested path.', - }, - inputFiles: { - type: 'array', - description: - 'Canonical workspace file IDs to mount in the sandbox. Discover IDs via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json"). Mounted path: /home/user/files/{fileId}/{originalName}.', - items: { - type: 'string', - }, - }, - inputTables: { - type: 'array', - description: - "Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Read with pandas: pd.read_csv('/home/user/tables/tbl_xxx.csv')", - items: { - type: 'string', - }, + "type": "object", + "properties": { + "blockIds": { + "type": "array", + "description": "Optional array of block UUIDs. If provided, returns outputs only for those blocks. If not provided, returns outputs for all blocks in the workflow.", + "items": { + "type": "string" + } }, - overwriteFileId: { - type: 'string', - description: - 'If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated chart so the existing chat resource stays current instead of creating a duplicate like "chart (1).png". The file ID is returned by previous generate_visualization or generate_image calls (fileId field), or can be found via read("files/by-id/{fileId}/meta.json").', - }, - }, - required: ['code'], + "workflowId": { + "type": "string", + "description": "Optional workflow ID. If not provided, uses the current workflow in context." + } + } }, resultSchema: undefined, }, - get_block_outputs: { + ["get_block_upstream_references"]: { parameters: { - type: 'object', - properties: { - blockIds: { - type: 'array', - description: - 'Optional array of block UUIDs. If provided, returns outputs only for those blocks. If not provided, returns outputs for all blocks in the workflow.', - items: { - type: 'string', - }, - }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID. If not provided, uses the current workflow in context.', + "type": "object", + "properties": { + "blockIds": { + "type": "array", + "description": "Required array of block UUIDs (minimum 1). Returns what each block can reference based on its position in the workflow graph.", + "items": { + "type": "string" + } }, + "workflowId": { + "type": "string", + "description": "Optional workflow ID. If not provided, uses the current workflow in context." + } }, + "required": [ + "blockIds" + ] }, resultSchema: undefined, }, - get_block_upstream_references: { + ["get_deployed_workflow_state"]: { parameters: { - type: 'object', - properties: { - blockIds: { - type: 'array', - description: - 'Required array of block UUIDs (minimum 1). Returns what each block can reference based on its position in the workflow graph.', - items: { - type: 'string', - }, - }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID. If not provided, uses the current workflow in context.', - }, - }, - required: ['blockIds'], + "type": "object", + "properties": { + "workflowId": { + "type": "string", + "description": "Optional workflow ID. If not provided, uses the current workflow in context." + } + } }, resultSchema: undefined, }, - get_deployed_workflow_state: { + ["get_deployment_version"]: { parameters: { - type: 'object', - properties: { - workflowId: { - type: 'string', - description: - 'Optional workflow ID. If not provided, uses the current workflow in context.', + "type": "object", + "properties": { + "version": { + "type": "number", + "description": "The deployment version number" }, + "workflowId": { + "type": "string", + "description": "The workflow ID" + } }, + "required": [ + "workflowId", + "version" + ] }, resultSchema: undefined, }, - get_deployment_version: { + ["get_execution_summary"]: { parameters: { - type: 'object', - properties: { - version: { - type: 'number', - description: 'The deployment version number', + "type": "object", + "properties": { + "limit": { + "type": "number", + "description": "Max number of executions to return (default: 10, max: 20)." }, - workflowId: { - type: 'string', - description: 'The workflow ID', + "status": { + "type": "string", + "description": "Filter by status: 'success', 'error', or 'all' (default: 'all').", + "enum": [ + "success", + "error", + "all" + ] }, - }, - required: ['workflowId', 'version'], - }, - resultSchema: undefined, - }, - get_execution_summary: { - parameters: { - type: 'object', - properties: { - limit: { - type: 'number', - description: 'Max number of executions to return (default: 10, max: 20).', - }, - status: { - type: 'string', - description: "Filter by status: 'success', 'error', or 'all' (default: 'all').", - enum: ['success', 'error', 'all'], - }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID. If omitted, returns executions across all workflows in the workspace.', - }, - workspaceId: { - type: 'string', - description: 'Workspace ID to scope executions to.', + "workflowId": { + "type": "string", + "description": "Optional workflow ID. If omitted, returns executions across all workflows in the workspace." }, + "workspaceId": { + "type": "string", + "description": "Workspace ID to scope executions to." + } }, - required: ['workspaceId'], + "required": [ + "workspaceId" + ] }, resultSchema: undefined, }, - get_job_logs: { + ["get_job_logs"]: { parameters: { - type: 'object', - properties: { - executionId: { - type: 'string', - description: 'Optional execution ID for a specific run.', - }, - includeDetails: { - type: 'boolean', - description: 'Include tool calls, outputs, and cost details.', + "type": "object", + "properties": { + "executionId": { + "type": "string", + "description": "Optional execution ID for a specific run." }, - jobId: { - type: 'string', - description: 'The job (schedule) ID to get logs for.', + "includeDetails": { + "type": "boolean", + "description": "Include tool calls, outputs, and cost details." }, - limit: { - type: 'number', - description: 'Max number of entries (default: 3, max: 5)', + "jobId": { + "type": "string", + "description": "The job (schedule) ID to get logs for." }, + "limit": { + "type": "number", + "description": "Max number of entries (default: 3, max: 5)" + } }, - required: ['jobId'], + "required": [ + "jobId" + ] }, resultSchema: undefined, }, - get_page_contents: { + ["get_page_contents"]: { parameters: { - type: 'object', - properties: { - include_highlights: { - type: 'boolean', - description: 'Include key highlights (default false)', + "type": "object", + "properties": { + "include_highlights": { + "type": "boolean", + "description": "Include key highlights (default false)" }, - include_summary: { - type: 'boolean', - description: 'Include AI-generated summary (default false)', + "include_summary": { + "type": "boolean", + "description": "Include AI-generated summary (default false)" }, - include_text: { - type: 'boolean', - description: 'Include full page text (default true)', - }, - urls: { - type: 'array', - description: 'URLs to get content from (max 10)', - items: { - type: 'string', - }, + "include_text": { + "type": "boolean", + "description": "Include full page text (default true)" }, + "urls": { + "type": "array", + "description": "URLs to get content from (max 10)", + "items": { + "type": "string" + } + } }, - required: ['urls'], + "required": [ + "urls" + ] }, resultSchema: undefined, }, - get_platform_actions: { + ["get_platform_actions"]: { parameters: { - type: 'object', - properties: {}, + "type": "object", + "properties": {} }, resultSchema: undefined, }, - get_workflow_data: { + ["get_workflow_data"]: { parameters: { - type: 'object', - properties: { - data_type: { - type: 'string', - description: 'The type of workflow data to retrieve', - enum: ['global_variables', 'custom_tools', 'mcp_tools', 'files'], - }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID. If not provided, uses the current workflow in context.', + "type": "object", + "properties": { + "data_type": { + "type": "string", + "description": "The type of workflow data to retrieve", + "enum": [ + "global_variables", + "custom_tools", + "mcp_tools", + "files" + ] }, + "workflowId": { + "type": "string", + "description": "Optional workflow ID. If not provided, uses the current workflow in context." + } }, - required: ['data_type'], + "required": [ + "data_type" + ] }, resultSchema: undefined, }, - get_workflow_logs: { + ["get_workflow_logs"]: { parameters: { - type: 'object', - properties: { - executionId: { - type: 'string', - description: - 'Optional execution ID to get logs for a specific execution. Use with get_execution_summary to find execution IDs first.', + "type": "object", + "properties": { + "executionId": { + "type": "string", + "description": "Optional execution ID to get logs for a specific execution. Use with get_execution_summary to find execution IDs first." }, - includeDetails: { - type: 'boolean', - description: 'Include detailed info', + "includeDetails": { + "type": "boolean", + "description": "Include detailed info" }, - limit: { - type: 'number', - description: 'Max number of entries (hard limit: 3)', + "limit": { + "type": "number", + "description": "Max number of entries (hard limit: 3)" }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID. If not provided, uses the current workflow in context.', - }, - }, + "workflowId": { + "type": "string", + "description": "Optional workflow ID. If not provided, uses the current workflow in context." + } + } }, resultSchema: undefined, }, - glob: { + ["glob"]: { parameters: { - type: 'object', - properties: { - pattern: { - type: 'string', - description: - 'Glob pattern to match file paths. Supports * (any segment) and ** (any depth).', - }, - toolTitle: { - type: 'string', - description: - 'Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like "workflow configs" or "knowledge bases", not a full sentence like "Finding workflow configs".', + "type": "object", + "properties": { + "pattern": { + "type": "string", + "description": "Glob pattern to match file paths. Supports * (any segment) and ** (any depth)." }, + "toolTitle": { + "type": "string", + "description": "Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like \"workflow configs\" or \"knowledge bases\", not a full sentence like \"Finding workflow configs\"." + } }, - required: ['pattern', 'toolTitle'], + "required": [ + "pattern", + "toolTitle" + ] }, resultSchema: undefined, }, - grep: { + ["grep"]: { parameters: { - type: 'object', - properties: { - context: { - type: 'number', - description: - "Number of lines to show before and after each match. Only applies to output_mode 'content'.", + "type": "object", + "properties": { + "context": { + "type": "number", + "description": "Number of lines to show before and after each match. Only applies to output_mode 'content'." }, - ignoreCase: { - type: 'boolean', - description: 'Case insensitive search (default false).', + "ignoreCase": { + "type": "boolean", + "description": "Case insensitive search (default false)." }, - lineNumbers: { - type: 'boolean', - description: - "Include line numbers in output (default true). Only applies to output_mode 'content'.", + "lineNumbers": { + "type": "boolean", + "description": "Include line numbers in output (default true). Only applies to output_mode 'content'." }, - maxResults: { - type: 'number', - description: 'Maximum number of matches to return (default 50).', + "maxResults": { + "type": "number", + "description": "Maximum number of matches to return (default 50)." }, - output_mode: { - type: 'string', - description: - "Output mode: 'content' shows matching lines (default), 'files_with_matches' shows only file paths, 'count' shows match counts per file.", - enum: ['content', 'files_with_matches', 'count'], + "output_mode": { + "type": "string", + "description": "Output mode: 'content' shows matching lines (default), 'files_with_matches' shows only file paths, 'count' shows match counts per file.", + "enum": [ + "content", + "files_with_matches", + "count" + ] }, - path: { - type: 'string', - description: - "Optional path prefix to scope the search (e.g. 'workflows/', 'environment/', 'internal/', 'components/blocks/').", + "path": { + "type": "string", + "description": "Optional path prefix to scope the search (e.g. 'workflows/', 'environment/', 'internal/', 'components/blocks/')." }, - pattern: { - type: 'string', - description: 'Regex pattern to search for in file contents.', - }, - toolTitle: { - type: 'string', - description: - 'Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like "Slack integrations" or "deployed workflows", not a full sentence like "Searching for Slack integrations".', + "pattern": { + "type": "string", + "description": "Regex pattern to search for in file contents." }, + "toolTitle": { + "type": "string", + "description": "Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like \"Slack integrations\" or \"deployed workflows\", not a full sentence like \"Searching for Slack integrations\"." + } }, - required: ['pattern', 'toolTitle'], + "required": [ + "pattern", + "toolTitle" + ] }, resultSchema: undefined, }, - job: { + ["job"]: { parameters: { - properties: { - request: { - description: 'What job action is needed.', - type: 'string', - }, + "properties": { + "request": { + "description": "What job action is needed.", + "type": "string" + } }, - required: ['request'], - type: 'object', + "required": [ + "request" + ], + "type": "object" }, resultSchema: undefined, }, - knowledge: { + ["knowledge"]: { parameters: { - properties: { - request: { - description: 'What knowledge base action is needed.', - type: 'string', - }, + "properties": { + "request": { + "description": "What knowledge base action is needed.", + "type": "string" + } }, - required: ['request'], - type: 'object', - }, - resultSchema: undefined, - }, - knowledge_base: { - parameters: { - type: 'object', - properties: { - args: { - type: 'object', - description: 'Arguments for the operation', - properties: { - apiKey: { - type: 'string', - description: - 'API key for API-key-based connectors (required when connector auth mode is apiKey)', - }, - chunkingConfig: { - type: 'object', - description: "Chunking configuration (optional for 'create')", - properties: { - maxSize: { - type: 'number', - description: 'Maximum chunk size (100-4000, default: 1024)', - default: 1024, - }, - minSize: { - type: 'number', - description: 'Minimum chunk size (1-2000, default: 1)', - default: 1, + "required": [ + "request" + ], + "type": "object" + }, + resultSchema: undefined, + }, + ["knowledge_base"]: { + parameters: { + "type": "object", + "properties": { + "args": { + "type": "object", + "description": "Arguments for the operation", + "properties": { + "apiKey": { + "type": "string", + "description": "API key for API-key-based connectors (required when connector auth mode is apiKey)" + }, + "chunkingConfig": { + "type": "object", + "description": "Chunking configuration (optional for 'create')", + "properties": { + "maxSize": { + "type": "number", + "description": "Maximum chunk size (100-4000, default: 1024)", + "default": 1024 }, - overlap: { - type: 'number', - description: 'Overlap between chunks (0-500, default: 200)', - default: 200, + "minSize": { + "type": "number", + "description": "Minimum chunk size (1-2000, default: 1)", + "default": 1 }, - }, - }, - connectorId: { - type: 'string', - description: - 'Connector ID (required for update_connector, delete_connector, sync_connector)', - }, - connectorStatus: { - type: 'string', - description: 'Connector status (optional for update_connector)', - enum: ['active', 'paused'], - }, - connectorType: { - type: 'string', - description: - "Connector type from registry, e.g. 'confluence', 'google_drive', 'notion' (required for add_connector). Read knowledgebases/connectors/{type}.json for the config schema.", - }, - credentialId: { - type: 'string', - description: - 'OAuth credential ID from environment/credentials.json (required for OAuth connectors)', - }, - description: { - type: 'string', - description: "Description of the knowledge base (optional for 'create')", - }, - disabledTagIds: { - type: 'array', - description: - 'Tag definition IDs to opt out of (optional for add_connector). See tagDefinitions in the connector schema.', - }, - documentId: { - type: 'string', - description: 'Document ID (required for update_document)', - }, - documentIds: { - type: 'array', - description: 'Document IDs (for batch delete_document)', - items: { - type: 'string', - }, - }, - enabled: { - type: 'boolean', - description: 'Enable/disable a document (optional for update_document)', - }, - fileIds: { - type: 'array', - description: - 'Canonical workspace file IDs to add as documents (for add_file). Discover via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json").', - items: { - type: 'string', - }, - }, - filename: { - type: 'string', - description: 'New filename for a document (optional for update_document)', - }, - knowledgeBaseId: { - type: 'string', - description: - 'Knowledge base ID (required for get, query, add_file, list_tags, create_tag, get_tag_usage)', - }, - knowledgeBaseIds: { - type: 'array', - description: 'Knowledge base IDs (for batch delete)', - items: { - type: 'string', - }, - }, - name: { - type: 'string', - description: "Name of the knowledge base (required for 'create')", - }, - query: { - type: 'string', - description: "Search query text (required for 'query')", - }, - sourceConfig: { - type: 'object', - description: - 'Connector-specific configuration matching the configFields in knowledgebases/connectors/{type}.json', - }, - syncIntervalMinutes: { - type: 'number', - description: - 'Sync interval in minutes: 60 (hourly), 360 (6h), 1440 (daily), 10080 (weekly), 0 (manual only). Default: 1440', - default: 1440, - }, - tagDefinitionId: { - type: 'string', - description: 'Tag definition ID (required for update_tag, delete_tag)', - }, - tagDisplayName: { - type: 'string', - description: - 'Display name for the tag (required for create_tag, optional for update_tag)', - }, - tagFieldType: { - type: 'string', - description: - 'Field type: text, number, date, boolean (optional for create_tag, defaults to text)', - enum: ['text', 'number', 'date', 'boolean'], - }, - topK: { - type: 'number', - description: 'Number of results to return (1-50, default: 5)', - default: 5, - }, - workspaceId: { - type: 'string', - description: "Workspace ID (required for 'create', optional filter for 'list')", - }, - }, - }, - operation: { - type: 'string', - description: 'The operation to perform', - enum: [ - 'create', - 'get', - 'query', - 'add_file', - 'update', - 'delete', - 'delete_document', - 'update_document', - 'list_tags', - 'create_tag', - 'update_tag', - 'delete_tag', - 'get_tag_usage', - 'add_connector', - 'update_connector', - 'delete_connector', - 'sync_connector', - ], - }, - }, - required: ['operation', 'args'], + "overlap": { + "type": "number", + "description": "Overlap between chunks (0-500, default: 200)", + "default": 200 + } + } + }, + "connectorId": { + "type": "string", + "description": "Connector ID (required for update_connector, delete_connector, sync_connector)" + }, + "connectorStatus": { + "type": "string", + "description": "Connector status (optional for update_connector)", + "enum": [ + "active", + "paused" + ] + }, + "connectorType": { + "type": "string", + "description": "Connector type from registry, e.g. 'confluence', 'google_drive', 'notion' (required for add_connector). Read knowledgebases/connectors/{type}.json for the config schema." + }, + "credentialId": { + "type": "string", + "description": "OAuth credential ID from environment/credentials.json (required for OAuth connectors)" + }, + "description": { + "type": "string", + "description": "Description of the knowledge base (optional for 'create')" + }, + "disabledTagIds": { + "type": "array", + "description": "Tag definition IDs to opt out of (optional for add_connector). See tagDefinitions in the connector schema." + }, + "documentId": { + "type": "string", + "description": "Document ID (required for update_document)" + }, + "documentIds": { + "type": "array", + "description": "Document IDs (for batch delete_document)", + "items": { + "type": "string" + } + }, + "enabled": { + "type": "boolean", + "description": "Enable/disable a document (optional for update_document)" + }, + "fileIds": { + "type": "array", + "description": "Canonical workspace file IDs to add as documents (for add_file). Discover via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\").", + "items": { + "type": "string" + } + }, + "filename": { + "type": "string", + "description": "New filename for a document (optional for update_document)" + }, + "knowledgeBaseId": { + "type": "string", + "description": "Knowledge base ID (required for get, query, add_file, list_tags, create_tag, get_tag_usage)" + }, + "knowledgeBaseIds": { + "type": "array", + "description": "Knowledge base IDs (for batch delete)", + "items": { + "type": "string" + } + }, + "name": { + "type": "string", + "description": "Name of the knowledge base (required for 'create')" + }, + "query": { + "type": "string", + "description": "Search query text (required for 'query')" + }, + "sourceConfig": { + "type": "object", + "description": "Connector-specific configuration matching the configFields in knowledgebases/connectors/{type}.json" + }, + "syncIntervalMinutes": { + "type": "number", + "description": "Sync interval in minutes: 60 (hourly), 360 (6h), 1440 (daily), 10080 (weekly), 0 (manual only). Default: 1440", + "default": 1440 + }, + "tagDefinitionId": { + "type": "string", + "description": "Tag definition ID (required for update_tag, delete_tag)" + }, + "tagDisplayName": { + "type": "string", + "description": "Display name for the tag (required for create_tag, optional for update_tag)" + }, + "tagFieldType": { + "type": "string", + "description": "Field type: text, number, date, boolean (optional for create_tag, defaults to text)", + "enum": [ + "text", + "number", + "date", + "boolean" + ] + }, + "topK": { + "type": "number", + "description": "Number of results to return (1-50, default: 5)", + "default": 5 + }, + "workspaceId": { + "type": "string", + "description": "Workspace ID (required for 'create', optional filter for 'list')" + } + } + }, + "operation": { + "type": "string", + "description": "The operation to perform", + "enum": [ + "create", + "get", + "query", + "add_file", + "update", + "delete", + "delete_document", + "update_document", + "list_tags", + "create_tag", + "update_tag", + "delete_tag", + "get_tag_usage", + "add_connector", + "update_connector", + "delete_connector", + "sync_connector" + ] + } + }, + "required": [ + "operation", + "args" + ] }, resultSchema: { - type: 'object', - properties: { - data: { - type: 'object', - description: 'Operation-specific result payload.', + "type": "object", + "properties": { + "data": { + "type": "object", + "description": "Operation-specific result payload." }, - message: { - type: 'string', - description: 'Human-readable outcome summary.', - }, - success: { - type: 'boolean', - description: 'Whether the operation succeeded.', + "message": { + "type": "string", + "description": "Human-readable outcome summary." }, + "success": { + "type": "boolean", + "description": "Whether the operation succeeded." + } }, - required: ['success', 'message'], + "required": [ + "success", + "message" + ] }, }, - list_folders: { + ["list_folders"]: { parameters: { - type: 'object', - properties: { - workspaceId: { - type: 'string', - description: 'Optional workspace ID to list folders for.', - }, - }, + "type": "object", + "properties": { + "workspaceId": { + "type": "string", + "description": "Optional workspace ID to list folders for." + } + } }, resultSchema: undefined, }, - list_user_workspaces: { + ["list_user_workspaces"]: { parameters: { - type: 'object', - properties: {}, + "type": "object", + "properties": {} }, resultSchema: undefined, }, - list_workspace_mcp_servers: { + ["list_workspace_mcp_servers"]: { parameters: { - type: 'object', - properties: { - workspaceId: { - type: 'string', - description: 'Workspace ID (defaults to current workspace)', - }, - }, + "type": "object", + "properties": { + "workspaceId": { + "type": "string", + "description": "Workspace ID (defaults to current workspace)" + } + } }, resultSchema: undefined, }, - manage_credential: { + ["manage_credential"]: { parameters: { - type: 'object', - properties: { - credentialId: { - type: 'string', - description: 'The credential ID (required for rename)', + "type": "object", + "properties": { + "credentialId": { + "type": "string", + "description": "The credential ID (required for rename)" }, - credentialIds: { - type: 'array', - description: 'Array of credential IDs (for batch delete)', - items: { - type: 'string', - }, - }, - displayName: { - type: 'string', - description: 'New display name (required for rename)', + "credentialIds": { + "type": "array", + "description": "Array of credential IDs (for batch delete)", + "items": { + "type": "string" + } }, - operation: { - type: 'string', - description: 'The operation to perform', - enum: ['rename', 'delete'], + "displayName": { + "type": "string", + "description": "New display name (required for rename)" }, + "operation": { + "type": "string", + "description": "The operation to perform", + "enum": [ + "rename", + "delete" + ] + } }, - required: ['operation'], + "required": [ + "operation" + ] }, resultSchema: undefined, }, - manage_custom_tool: { + ["manage_custom_tool"]: { parameters: { - type: 'object', - properties: { - code: { - type: 'string', - description: - 'The JavaScript code that executes when the tool is called (required for add). Parameters from schema are available as variables. Function body only - no signature or wrapping braces.', + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "The JavaScript code that executes when the tool is called (required for add). Parameters from schema are available as variables. Function body only - no signature or wrapping braces." }, - operation: { - type: 'string', - description: "The operation to perform: 'add', 'edit', 'list', or 'delete'", - enum: ['add', 'edit', 'delete', 'list'], + "operation": { + "type": "string", + "description": "The operation to perform: 'add', 'edit', 'list', or 'delete'", + "enum": [ + "add", + "edit", + "delete", + "list" + ] }, - schema: { - type: 'object', - description: 'The tool schema in OpenAI function calling format (required for add).', - properties: { - function: { - type: 'object', - description: 'The function definition', - properties: { - description: { - type: 'string', - description: 'What the function does', + "schema": { + "type": "object", + "description": "The tool schema in OpenAI function calling format (required for add).", + "properties": { + "function": { + "type": "object", + "description": "The function definition", + "properties": { + "description": { + "type": "string", + "description": "What the function does" }, - name: { - type: 'string', - description: 'The function name (camelCase)', + "name": { + "type": "string", + "description": "The function name (camelCase)" }, - parameters: { - type: 'object', - description: 'The function parameters schema', - properties: { - properties: { - type: 'object', - description: 'Parameter definitions as key-value pairs', - }, - required: { - type: 'array', - description: 'Array of required parameter names', - items: { - type: 'string', - }, + "parameters": { + "type": "object", + "description": "The function parameters schema", + "properties": { + "properties": { + "type": "object", + "description": "Parameter definitions as key-value pairs" }, - type: { - type: 'string', - description: "Must be 'object'", + "required": { + "type": "array", + "description": "Array of required parameter names", + "items": { + "type": "string" + } }, + "type": { + "type": "string", + "description": "Must be 'object'" + } }, - required: ['type', 'properties'], - }, - }, - required: ['name', 'parameters'], - }, - type: { - type: 'string', - description: "Must be 'function'", - }, - }, - required: ['type', 'function'], - }, - toolId: { - type: 'string', - description: - "The ID of the custom tool (required for edit). Must be the exact toolId from the get_workflow_data custom tool response - do not guess or construct it. DO NOT PROVIDE THE TOOL ID IF THE OPERATION IS 'ADD'.", - }, - toolIds: { - type: 'array', - description: 'Array of custom tool IDs (for batch delete)', - items: { - type: 'string', - }, - }, - }, - required: ['operation'], - }, - resultSchema: undefined, - }, - manage_job: { - parameters: { - type: 'object', - properties: { - args: { - type: 'object', - description: - 'Operation-specific arguments. For create: {title, prompt, cron?, time?, timezone?, lifecycle?, successCondition?, maxRuns?}. For get/delete: {jobId}. For update: {jobId, title?, prompt?, cron?, timezone?, status?, lifecycle?, successCondition?, maxRuns?}. For list: no args needed.', - properties: { - cron: { - type: 'string', - description: 'Cron expression for recurring jobs', - }, - jobId: { - type: 'string', - description: 'Job ID (required for get, update)', - }, - jobIds: { - type: 'array', - description: 'Array of job IDs (for batch delete)', - items: { - type: 'string', + "required": [ + "type", + "properties" + ] + } }, - }, - lifecycle: { - type: 'string', - description: - "'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called.", - }, - maxRuns: { - type: 'integer', - description: 'Max executions before auto-completing. Safety limit.', - }, - prompt: { - type: 'string', - description: 'The prompt to execute when the job fires', - }, - status: { - type: 'string', - description: 'Job status: active, paused', - }, - successCondition: { - type: 'string', - description: - 'What must happen for the job to be considered complete (until_complete lifecycle).', - }, - time: { - type: 'string', - description: 'ISO 8601 datetime for one-time jobs or cron start time', - }, - timezone: { - type: 'string', - description: 'IANA timezone (e.g. America/New_York). Defaults to UTC.', - }, - title: { - type: 'string', - description: "Short descriptive title for the job (e.g. 'Email Poller')", - }, - }, - }, - operation: { - type: 'string', - description: 'The operation to perform: create, list, get, update, delete', - enum: ['create', 'list', 'get', 'update', 'delete'], - }, - }, - required: ['operation'], - }, - resultSchema: undefined, - }, - manage_mcp_tool: { - parameters: { - type: 'object', - properties: { - config: { - type: 'object', - description: 'Required for add and edit. The MCP server configuration.', - properties: { - enabled: { - type: 'boolean', - description: 'Whether the server is enabled (default: true)', - }, - headers: { - type: 'object', - description: 'Optional HTTP headers to send with requests (key-value pairs)', - }, - name: { - type: 'string', - description: 'Display name for the MCP server', - }, - timeout: { - type: 'number', - description: 'Request timeout in milliseconds (default: 30000)', - }, - transport: { - type: 'string', - description: "Transport protocol: 'streamable-http' or 'sse'", - enum: ['streamable-http', 'sse'], - default: 'streamable-http', - }, - url: { - type: 'string', - description: 'The MCP server endpoint URL (required for add)', - }, + "required": [ + "name", + "parameters" + ] + }, + "type": { + "type": "string", + "description": "Must be 'function'" + } }, + "required": [ + "type", + "function" + ] + }, + "toolId": { + "type": "string", + "description": "The ID of the custom tool (required for edit). Must be the exact toolId from the get_workflow_data custom tool response - do not guess or construct it. DO NOT PROVIDE THE TOOL ID IF THE OPERATION IS 'ADD'." + }, + "toolIds": { + "type": "array", + "description": "Array of custom tool IDs (for batch delete)", + "items": { + "type": "string" + } + } + }, + "required": [ + "operation" + ] + }, + resultSchema: undefined, + }, + ["manage_job"]: { + parameters: { + "type": "object", + "properties": { + "args": { + "type": "object", + "description": "Operation-specific arguments. For create: {title, prompt, cron?, time?, timezone?, lifecycle?, successCondition?, maxRuns?}. For get/delete: {jobId}. For update: {jobId, title?, prompt?, cron?, timezone?, status?, lifecycle?, successCondition?, maxRuns?}. For list: no args needed.", + "properties": { + "cron": { + "type": "string", + "description": "Cron expression for recurring jobs" + }, + "jobId": { + "type": "string", + "description": "Job ID (required for get, update)" + }, + "jobIds": { + "type": "array", + "description": "Array of job IDs (for batch delete)", + "items": { + "type": "string" + } + }, + "lifecycle": { + "type": "string", + "description": "'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called." + }, + "maxRuns": { + "type": "integer", + "description": "Max executions before auto-completing. Safety limit." + }, + "prompt": { + "type": "string", + "description": "The prompt to execute when the job fires" + }, + "status": { + "type": "string", + "description": "Job status: active, paused" + }, + "successCondition": { + "type": "string", + "description": "What must happen for the job to be considered complete (until_complete lifecycle)." + }, + "time": { + "type": "string", + "description": "ISO 8601 datetime for one-time jobs or cron start time" + }, + "timezone": { + "type": "string", + "description": "IANA timezone (e.g. America/New_York). Defaults to UTC." + }, + "title": { + "type": "string", + "description": "Short descriptive title for the job (e.g. 'Email Poller')" + } + } + }, + "operation": { + "type": "string", + "description": "The operation to perform: create, list, get, update, delete", + "enum": [ + "create", + "list", + "get", + "update", + "delete" + ] + } + }, + "required": [ + "operation" + ] + }, + resultSchema: undefined, + }, + ["manage_mcp_tool"]: { + parameters: { + "type": "object", + "properties": { + "config": { + "type": "object", + "description": "Required for add and edit. The MCP server configuration.", + "properties": { + "enabled": { + "type": "boolean", + "description": "Whether the server is enabled (default: true)" + }, + "headers": { + "type": "object", + "description": "Optional HTTP headers to send with requests (key-value pairs)" + }, + "name": { + "type": "string", + "description": "Display name for the MCP server" + }, + "timeout": { + "type": "number", + "description": "Request timeout in milliseconds (default: 30000)" + }, + "transport": { + "type": "string", + "description": "Transport protocol: 'streamable-http' or 'sse'", + "enum": [ + "streamable-http", + "sse" + ], + "default": "streamable-http" + }, + "url": { + "type": "string", + "description": "The MCP server endpoint URL (required for add)" + } + } + }, + "operation": { + "type": "string", + "description": "The operation to perform: 'add', 'edit', 'list', or 'delete'", + "enum": [ + "add", + "edit", + "delete", + "list" + ] + }, + "serverId": { + "type": "string", + "description": "Required for edit and delete. The database ID of the MCP server. DO NOT PROVIDE if operation is 'add' or 'list'." + } + }, + "required": [ + "operation" + ] + }, + resultSchema: undefined, + }, + ["manage_skill"]: { + parameters: { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "Markdown instructions for the skill. Required for add, optional for edit." + }, + "description": { + "type": "string", + "description": "Short description of the skill. Required for add, optional for edit." + }, + "name": { + "type": "string", + "description": "Skill name in kebab-case (e.g. 'my-skill'). Required for add, optional for edit." + }, + "operation": { + "type": "string", + "description": "The operation to perform: 'add', 'edit', 'list', or 'delete'", + "enum": [ + "add", + "edit", + "delete", + "list" + ] + }, + "skillId": { + "type": "string", + "description": "The ID of the skill (required for edit/delete). Must be the exact ID from the VFS or list. DO NOT PROVIDE if operation is 'add' or 'list'." + } + }, + "required": [ + "operation" + ] + }, + resultSchema: undefined, + }, + ["materialize_file"]: { + parameters: { + "type": "object", + "properties": { + "fileNames": { + "type": "array", + "description": "The names of the uploaded files to materialize (e.g. [\"report.pdf\", \"data.csv\"])", + "items": { + "type": "string" + } + }, + "knowledgeBaseId": { + "type": "string", + "description": "ID of an existing knowledge base to add the file to (only used with operation \"knowledge_base\"). If omitted, a new KB is created." + }, + "operation": { + "type": "string", + "description": "What to do with the file. \"save\" promotes it to files/. \"import\" imports a workflow JSON. \"table\" converts CSV/TSV/JSON to a table. \"knowledge_base\" saves and adds to a KB. Defaults to \"save\".", + "enum": [ + "save", + "import", + "table", + "knowledge_base" + ], + "default": "save" }, - operation: { - type: 'string', - description: "The operation to perform: 'add', 'edit', 'list', or 'delete'", - enum: ['add', 'edit', 'delete', 'list'], - }, - serverId: { - type: 'string', - description: - "Required for edit and delete. The database ID of the MCP server. DO NOT PROVIDE if operation is 'add' or 'list'.", - }, - }, - required: ['operation'], - }, - resultSchema: undefined, - }, - manage_skill: { - parameters: { - type: 'object', - properties: { - content: { - type: 'string', - description: 'Markdown instructions for the skill. Required for add, optional for edit.', - }, - description: { - type: 'string', - description: 'Short description of the skill. Required for add, optional for edit.', - }, - name: { - type: 'string', - description: - "Skill name in kebab-case (e.g. 'my-skill'). Required for add, optional for edit.", - }, - operation: { - type: 'string', - description: "The operation to perform: 'add', 'edit', 'list', or 'delete'", - enum: ['add', 'edit', 'delete', 'list'], - }, - skillId: { - type: 'string', - description: - "The ID of the skill (required for edit/delete). Must be the exact ID from the VFS or list. DO NOT PROVIDE if operation is 'add' or 'list'.", - }, - }, - required: ['operation'], - }, - resultSchema: undefined, - }, - materialize_file: { - parameters: { - type: 'object', - properties: { - fileNames: { - type: 'array', - description: - 'The names of the uploaded files to materialize (e.g. ["report.pdf", "data.csv"])', - items: { - type: 'string', - }, - }, - knowledgeBaseId: { - type: 'string', - description: - 'ID of an existing knowledge base to add the file to (only used with operation "knowledge_base"). If omitted, a new KB is created.', - }, - operation: { - type: 'string', - description: - 'What to do with the file. "save" promotes it to files/. "import" imports a workflow JSON. "table" converts CSV/TSV/JSON to a table. "knowledge_base" saves and adds to a KB. Defaults to "save".', - enum: ['save', 'import', 'table', 'knowledge_base'], - default: 'save', - }, - tableName: { - type: 'string', - description: - 'Custom name for the table (only used with operation "table"). Defaults to the file name without extension.', - }, + "tableName": { + "type": "string", + "description": "Custom name for the table (only used with operation \"table\"). Defaults to the file name without extension." + } }, - required: ['fileNames'], + "required": [ + "fileNames" + ] }, resultSchema: undefined, }, - move_folder: { + ["move_folder"]: { parameters: { - type: 'object', - properties: { - folderId: { - type: 'string', - description: 'The folder ID to move.', - }, - parentId: { - type: 'string', - description: - 'Target parent folder ID. Omit or pass empty string to move to workspace root.', + "type": "object", + "properties": { + "folderId": { + "type": "string", + "description": "The folder ID to move." }, + "parentId": { + "type": "string", + "description": "Target parent folder ID. Omit or pass empty string to move to workspace root." + } }, - required: ['folderId'], + "required": [ + "folderId" + ] }, resultSchema: undefined, }, - move_workflow: { + ["move_workflow"]: { parameters: { - type: 'object', - properties: { - folderId: { - type: 'string', - description: 'Target folder ID. Omit or pass empty string to move to workspace root.', - }, - workflowIds: { - type: 'array', - description: 'The workflow IDs to move.', - items: { - type: 'string', - }, + "type": "object", + "properties": { + "folderId": { + "type": "string", + "description": "Target folder ID. Omit or pass empty string to move to workspace root." }, + "workflowIds": { + "type": "array", + "description": "The workflow IDs to move.", + "items": { + "type": "string" + } + } }, - required: ['workflowIds'], + "required": [ + "workflowIds" + ] }, resultSchema: undefined, }, - oauth_get_auth_link: { + ["oauth_get_auth_link"]: { parameters: { - type: 'object', - properties: { - providerName: { - type: 'string', - description: - "The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar', 'GitHub')", - }, + "type": "object", + "properties": { + "providerName": { + "type": "string", + "description": "The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar', 'GitHub')" + } }, - required: ['providerName'], + "required": [ + "providerName" + ] }, resultSchema: undefined, }, - oauth_request_access: { + ["oauth_request_access"]: { parameters: { - type: 'object', - properties: { - providerName: { - type: 'string', - description: - "The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar')", - }, + "type": "object", + "properties": { + "providerName": { + "type": "string", + "description": "The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar')" + } }, - required: ['providerName'], + "required": [ + "providerName" + ] }, resultSchema: undefined, }, - open_resource: { + ["open_resource"]: { parameters: { - type: 'object', - properties: { - resources: { - type: 'array', - description: 'Array of resources to open. Each item must have type and id.', - items: { - type: 'object', - properties: { - id: { - type: 'string', - description: 'The resource ID.', - }, - type: { - type: 'string', - description: 'The resource type.', - enum: ['workflow', 'table', 'knowledgebase', 'file', 'log'], + "type": "object", + "properties": { + "resources": { + "type": "array", + "description": "Array of resources to open. Each item must have type and id.", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The resource ID." }, + "type": { + "type": "string", + "description": "The resource type.", + "enum": [ + "workflow", + "table", + "knowledgebase", + "file", + "log" + ] + } }, - required: ['type', 'id'], - }, - }, + "required": [ + "type", + "id" + ] + } + } }, - required: ['resources'], + "required": [ + "resources" + ] }, resultSchema: undefined, }, - read: { + ["read"]: { parameters: { - type: 'object', - properties: { - limit: { - type: 'number', - description: 'Maximum number of lines to read.', + "type": "object", + "properties": { + "limit": { + "type": "number", + "description": "Maximum number of lines to read." }, - offset: { - type: 'number', - description: 'Line offset to start reading from (0-indexed).', + "offset": { + "type": "number", + "description": "Line offset to start reading from (0-indexed)." }, - outputTable: { - type: 'string', - description: - 'Table ID to import the file contents into (CSV/JSON). All existing rows are replaced. Example: "tbl_abc123"', - }, - path: { - type: 'string', - description: - "Path to the file to read (e.g. 'workflows/My Workflow/state.json' or 'workflows/Projects/Q1/My Workflow/state.json').", + "outputTable": { + "type": "string", + "description": "Table ID to import the file contents into (CSV/JSON). All existing rows are replaced. Example: \"tbl_abc123\"" }, + "path": { + "type": "string", + "description": "Path to the file to read (e.g. 'workflows/My Workflow/state.json' or 'workflows/Projects/Q1/My Workflow/state.json')." + } }, - required: ['path'], + "required": [ + "path" + ] }, resultSchema: undefined, }, - redeploy: { + ["redeploy"]: { parameters: { - type: 'object', - properties: { - workflowId: { - type: 'string', - description: 'Workflow ID to redeploy (required in workspace context)', - }, - }, + "type": "object", + "properties": { + "workflowId": { + "type": "string", + "description": "Workflow ID to redeploy (required in workspace context)" + } + } }, resultSchema: { - type: 'object', - properties: { - apiEndpoint: { - type: 'string', - description: 'Canonical workflow execution endpoint.', - }, - baseUrl: { - type: 'string', - description: 'Base URL used to construct deployment URLs.', - }, - deployedAt: { - type: 'string', - description: 'Deployment timestamp when the workflow is deployed.', - }, - deploymentConfig: { - type: 'object', - description: - 'Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details.', - }, - deploymentStatus: { - type: 'object', - description: - 'Structured per-surface deployment status keyed by surface name, such as api.', - }, - deploymentType: { - type: 'string', - description: - 'Deployment surface this result describes. For deploy_api and redeploy this is always "api".', - }, - examples: { - type: 'object', - description: - 'Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling.', - }, - isDeployed: { - type: 'boolean', - description: 'Whether the workflow API is currently deployed after this tool call.', - }, - version: { - type: 'number', - description: 'Deployment version for the current API deployment.', - }, - workflowId: { - type: 'string', - description: 'Workflow ID that was deployed or undeployed.', - }, - }, - required: [ - 'workflowId', - 'isDeployed', - 'deploymentType', - 'deploymentStatus', - 'deploymentConfig', - 'examples', - ], - }, - }, - rename_file: { - parameters: { - type: 'object', - properties: { - fileId: { - type: 'string', - description: 'Canonical workspace file ID of the file to rename.', - }, - newName: { - type: 'string', - description: - 'New filename including extension, e.g. "draft_v2.md". Must not contain slashes.', - }, - }, - required: ['fileId', 'newName'], + "type": "object", + "properties": { + "apiEndpoint": { + "type": "string", + "description": "Canonical workflow execution endpoint." + }, + "baseUrl": { + "type": "string", + "description": "Base URL used to construct deployment URLs." + }, + "deployedAt": { + "type": "string", + "description": "Deployment timestamp when the workflow is deployed." + }, + "deploymentConfig": { + "type": "object", + "description": "Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details." + }, + "deploymentStatus": { + "type": "object", + "description": "Structured per-surface deployment status keyed by surface name, such as api." + }, + "deploymentType": { + "type": "string", + "description": "Deployment surface this result describes. For deploy_api and redeploy this is always \"api\"." + }, + "examples": { + "type": "object", + "description": "Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling." + }, + "isDeployed": { + "type": "boolean", + "description": "Whether the workflow API is currently deployed after this tool call." + }, + "version": { + "type": "number", + "description": "Deployment version for the current API deployment." + }, + "workflowId": { + "type": "string", + "description": "Workflow ID that was deployed or undeployed." + } + }, + "required": [ + "workflowId", + "isDeployed", + "deploymentType", + "deploymentStatus", + "deploymentConfig", + "examples" + ] + }, + }, + ["rename_file"]: { + parameters: { + "type": "object", + "properties": { + "fileId": { + "type": "string", + "description": "Canonical workspace file ID of the file to rename." + }, + "newName": { + "type": "string", + "description": "New filename including extension, e.g. \"draft_v2.md\". Must not contain slashes." + } + }, + "required": [ + "fileId", + "newName" + ] }, resultSchema: { - type: 'object', - properties: { - data: { - type: 'object', - description: 'Contains id and the new name.', + "type": "object", + "properties": { + "data": { + "type": "object", + "description": "Contains id and the new name." }, - message: { - type: 'string', - description: 'Human-readable outcome.', - }, - success: { - type: 'boolean', - description: 'Whether the rename succeeded.', + "message": { + "type": "string", + "description": "Human-readable outcome." }, + "success": { + "type": "boolean", + "description": "Whether the rename succeeded." + } }, - required: ['success', 'message'], + "required": [ + "success", + "message" + ] }, }, - rename_workflow: { + ["rename_workflow"]: { parameters: { - type: 'object', - properties: { - name: { - type: 'string', - description: 'The new name for the workflow.', - }, - workflowId: { - type: 'string', - description: 'The workflow ID to rename.', + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The new name for the workflow." }, + "workflowId": { + "type": "string", + "description": "The workflow ID to rename." + } }, - required: ['workflowId', 'name'], + "required": [ + "workflowId", + "name" + ] }, resultSchema: undefined, }, - research: { + ["research"]: { parameters: { - properties: { - topic: { - description: 'The topic to research.', - type: 'string', - }, + "properties": { + "topic": { + "description": "The topic to research.", + "type": "string" + } }, - required: ['topic'], - type: 'object', + "required": [ + "topic" + ], + "type": "object" }, resultSchema: undefined, }, - respond: { + ["respond"]: { parameters: { - additionalProperties: true, - properties: { - output: { - description: - 'The result — facts, status, VFS paths to persisted data, whatever the caller needs to act on.', - type: 'string', - }, - success: { - description: 'Whether the task completed successfully', - type: 'boolean', + "additionalProperties": true, + "properties": { + "output": { + "description": "The result — facts, status, VFS paths to persisted data, whatever the caller needs to act on.", + "type": "string" }, - type: { - description: 'Optional logical result type override', - type: 'string', + "success": { + "description": "Whether the task completed successfully", + "type": "boolean" }, + "type": { + "description": "Optional logical result type override", + "type": "string" + } }, - required: ['output', 'success'], - type: 'object', + "required": [ + "output", + "success" + ], + "type": "object" }, resultSchema: undefined, }, - restore_resource: { + ["restore_resource"]: { parameters: { - type: 'object', - properties: { - id: { - type: 'string', - description: 'The canonical resource ID to restore.', - }, - type: { - type: 'string', - description: 'The resource type to restore.', - enum: ['workflow', 'table', 'file', 'knowledgebase', 'folder'], + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The canonical resource ID to restore." }, + "type": { + "type": "string", + "description": "The resource type to restore.", + "enum": [ + "workflow", + "table", + "file", + "knowledgebase", + "folder" + ] + } }, - required: ['type', 'id'], + "required": [ + "type", + "id" + ] }, resultSchema: undefined, }, - revert_to_version: { + ["revert_to_version"]: { parameters: { - type: 'object', - properties: { - version: { - type: 'number', - description: 'The deployment version number to revert to', - }, - workflowId: { - type: 'string', - description: 'The workflow ID', + "type": "object", + "properties": { + "version": { + "type": "number", + "description": "The deployment version number to revert to" }, + "workflowId": { + "type": "string", + "description": "The workflow ID" + } }, - required: ['workflowId', 'version'], + "required": [ + "workflowId", + "version" + ] }, resultSchema: undefined, }, - run: { + ["run"]: { parameters: { - properties: { - context: { - description: 'Pre-gathered context: workflow state, block IDs, input requirements.', - type: 'string', - }, - request: { - description: 'What to run or what logs to check.', - type: 'string', + "properties": { + "context": { + "description": "Pre-gathered context: workflow state, block IDs, input requirements.", + "type": "string" }, + "request": { + "description": "What to run or what logs to check.", + "type": "string" + } }, - required: ['request'], - type: 'object', + "required": [ + "request" + ], + "type": "object" }, resultSchema: undefined, }, - run_block: { + ["run_block"]: { parameters: { - type: 'object', - properties: { - blockId: { - type: 'string', - description: 'The block ID to run in isolation.', + "type": "object", + "properties": { + "blockId": { + "type": "string", + "description": "The block ID to run in isolation." }, - executionId: { - type: 'string', - description: - 'Optional execution ID to load the snapshot from. Uses latest execution if omitted.', + "executionId": { + "type": "string", + "description": "Optional execution ID to load the snapshot from. Uses latest execution if omitted." }, - useDeployedState: { - type: 'boolean', - description: - 'When true, runs the deployed version instead of the live draft. Default: false (draft).', + "useDeployedState": { + "type": "boolean", + "description": "When true, runs the deployed version instead of the live draft. Default: false (draft)." }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID to run. If not provided, uses the current workflow in context.', - }, - workflow_input: { - type: 'object', - description: 'JSON object with key-value mappings where each key is an input field name', + "workflowId": { + "type": "string", + "description": "Optional workflow ID to run. If not provided, uses the current workflow in context." }, + "workflow_input": { + "type": "object", + "description": "JSON object with key-value mappings where each key is an input field name" + } }, - required: ['blockId'], + "required": [ + "blockId" + ] }, resultSchema: undefined, }, - run_from_block: { + ["run_from_block"]: { parameters: { - type: 'object', - properties: { - executionId: { - type: 'string', - description: - 'Optional execution ID to load the snapshot from. Uses latest execution if omitted.', - }, - startBlockId: { - type: 'string', - description: 'The block ID to start execution from.', + "type": "object", + "properties": { + "executionId": { + "type": "string", + "description": "Optional execution ID to load the snapshot from. Uses latest execution if omitted." }, - useDeployedState: { - type: 'boolean', - description: - 'When true, runs the deployed version instead of the live draft. Default: false (draft).', + "startBlockId": { + "type": "string", + "description": "The block ID to start execution from." }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID to run. If not provided, uses the current workflow in context.', + "useDeployedState": { + "type": "boolean", + "description": "When true, runs the deployed version instead of the live draft. Default: false (draft)." }, - workflow_input: { - type: 'object', - description: 'JSON object with key-value mappings where each key is an input field name', + "workflowId": { + "type": "string", + "description": "Optional workflow ID to run. If not provided, uses the current workflow in context." }, + "workflow_input": { + "type": "object", + "description": "JSON object with key-value mappings where each key is an input field name" + } }, - required: ['startBlockId'], + "required": [ + "startBlockId" + ] }, resultSchema: undefined, }, - run_workflow: { + ["run_workflow"]: { parameters: { - type: 'object', - properties: { - triggerBlockId: { - type: 'string', - description: - 'Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one.', + "type": "object", + "properties": { + "triggerBlockId": { + "type": "string", + "description": "Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one." }, - useDeployedState: { - type: 'boolean', - description: - 'When true, runs the deployed version instead of the live draft. Default: false (draft).', + "useDeployedState": { + "type": "boolean", + "description": "When true, runs the deployed version instead of the live draft. Default: false (draft)." }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID to run. If not provided, uses the current workflow in context.', - }, - workflow_input: { - type: 'object', - description: 'JSON object with key-value mappings where each key is an input field name', + "workflowId": { + "type": "string", + "description": "Optional workflow ID to run. If not provided, uses the current workflow in context." }, + "workflow_input": { + "type": "object", + "description": "JSON object with key-value mappings where each key is an input field name" + } }, - required: ['workflow_input'], + "required": [ + "workflow_input" + ] }, resultSchema: undefined, }, - run_workflow_until_block: { + ["run_workflow_until_block"]: { parameters: { - type: 'object', - properties: { - stopAfterBlockId: { - type: 'string', - description: 'The block ID to stop after. Execution halts once this block completes.', - }, - triggerBlockId: { - type: 'string', - description: - 'Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one.', + "type": "object", + "properties": { + "stopAfterBlockId": { + "type": "string", + "description": "The block ID to stop after. Execution halts once this block completes." }, - useDeployedState: { - type: 'boolean', - description: - 'When true, runs the deployed version instead of the live draft. Default: false (draft).', + "triggerBlockId": { + "type": "string", + "description": "Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one." }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID to run. If not provided, uses the current workflow in context.', + "useDeployedState": { + "type": "boolean", + "description": "When true, runs the deployed version instead of the live draft. Default: false (draft)." }, - workflow_input: { - type: 'object', - description: 'JSON object with key-value mappings where each key is an input field name', + "workflowId": { + "type": "string", + "description": "Optional workflow ID to run. If not provided, uses the current workflow in context." }, + "workflow_input": { + "type": "object", + "description": "JSON object with key-value mappings where each key is an input field name" + } }, - required: ['stopAfterBlockId'], + "required": [ + "stopAfterBlockId" + ] }, resultSchema: undefined, }, - scrape_page: { + ["scrape_page"]: { parameters: { - type: 'object', - properties: { - include_links: { - type: 'boolean', - description: 'Extract all links from the page (default false)', - }, - url: { - type: 'string', - description: 'The URL to scrape (must include https://)', + "type": "object", + "properties": { + "include_links": { + "type": "boolean", + "description": "Extract all links from the page (default false)" }, - wait_for: { - type: 'string', - description: 'CSS selector to wait for before scraping (for JS-heavy pages)', + "url": { + "type": "string", + "description": "The URL to scrape (must include https://)" }, + "wait_for": { + "type": "string", + "description": "CSS selector to wait for before scraping (for JS-heavy pages)" + } }, - required: ['url'], + "required": [ + "url" + ] }, resultSchema: undefined, }, - search_documentation: { + ["search_documentation"]: { parameters: { - type: 'object', - properties: { - query: { - type: 'string', - description: 'The search query', - }, - topK: { - type: 'number', - description: 'Number of results (max 10)', + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The search query" }, + "topK": { + "type": "number", + "description": "Number of results (max 10)" + } }, - required: ['query'], + "required": [ + "query" + ] }, resultSchema: undefined, }, - search_library_docs: { + ["search_library_docs"]: { parameters: { - type: 'object', - properties: { - library_name: { - type: 'string', - description: "Name of the library to search for (e.g., 'nextjs', 'stripe', 'langchain')", - }, - query: { - type: 'string', - description: 'The question or topic to find documentation for - be specific', + "type": "object", + "properties": { + "library_name": { + "type": "string", + "description": "Name of the library to search for (e.g., 'nextjs', 'stripe', 'langchain')" }, - version: { - type: 'string', - description: "Specific version (optional, e.g., '14', 'v2')", + "query": { + "type": "string", + "description": "The question or topic to find documentation for - be specific" }, + "version": { + "type": "string", + "description": "Specific version (optional, e.g., '14', 'v2')" + } }, - required: ['library_name', 'query'], + "required": [ + "library_name", + "query" + ] }, resultSchema: undefined, }, - search_online: { + ["search_online"]: { parameters: { - type: 'object', - properties: { - category: { - type: 'string', - description: 'Filter by category', - enum: [ - 'news', - 'tweet', - 'github', - 'paper', - 'company', - 'research paper', - 'linkedin profile', - 'pdf', - 'personal site', - ], - }, - include_text: { - type: 'boolean', - description: 'Include page text content (default true)', + "type": "object", + "properties": { + "category": { + "type": "string", + "description": "Filter by category", + "enum": [ + "news", + "tweet", + "github", + "paper", + "company", + "research paper", + "linkedin profile", + "pdf", + "personal site" + ] }, - num_results: { - type: 'number', - description: 'Number of results (default 10, max 25)', + "include_text": { + "type": "boolean", + "description": "Include page text content (default true)" }, - query: { - type: 'string', - description: 'Natural language search query', + "num_results": { + "type": "number", + "description": "Number of results (default 10, max 25)" }, - toolTitle: { - type: 'string', - description: - 'Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like "pricing changes" or "Slack webhook docs", not a full sentence like "Searching online for pricing changes".', + "query": { + "type": "string", + "description": "Natural language search query" }, + "toolTitle": { + "type": "string", + "description": "Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like \"pricing changes\" or \"Slack webhook docs\", not a full sentence like \"Searching online for pricing changes\"." + } }, - required: ['query', 'toolTitle'], + "required": [ + "query", + "toolTitle" + ] }, resultSchema: undefined, }, - search_patterns: { + ["search_patterns"]: { parameters: { - type: 'object', - properties: { - limit: { - type: 'integer', - description: 'Maximum number of unique pattern examples to return (defaults to 3).', - }, - queries: { - type: 'array', - description: - 'Up to 3 descriptive strings explaining the workflow pattern(s) you need. Focus on intent and desired outcomes.', - items: { - type: 'string', - description: 'Example: "how to automate wealthbox meeting notes into follow-up tasks"', - }, + "type": "object", + "properties": { + "limit": { + "type": "integer", + "description": "Maximum number of unique pattern examples to return (defaults to 3)." }, + "queries": { + "type": "array", + "description": "Up to 3 descriptive strings explaining the workflow pattern(s) you need. Focus on intent and desired outcomes.", + "items": { + "type": "string", + "description": "Example: \"how to automate wealthbox meeting notes into follow-up tasks\"" + } + } }, - required: ['queries'], + "required": [ + "queries" + ] }, resultSchema: undefined, }, - set_block_enabled: { + ["set_block_enabled"]: { parameters: { - type: 'object', - properties: { - blockId: { - type: 'string', - description: 'The block ID whose enabled state should be changed.', - }, - enabled: { - type: 'boolean', - description: 'Set to true to enable the block, or false to disable it.', + "type": "object", + "properties": { + "blockId": { + "type": "string", + "description": "The block ID whose enabled state should be changed." }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID to edit. If not provided, uses the current workflow in context.', + "enabled": { + "type": "boolean", + "description": "Set to true to enable the block, or false to disable it." }, + "workflowId": { + "type": "string", + "description": "Optional workflow ID to edit. If not provided, uses the current workflow in context." + } }, - required: ['blockId', 'enabled'], + "required": [ + "blockId", + "enabled" + ] }, - resultSchema: undefined, + resultSchema: undefined, }, - set_environment_variables: { + ["set_environment_variables"]: { parameters: { - type: 'object', - properties: { - scope: { - type: 'string', - description: - 'Whether to set workspace or personal environment variables. Defaults to workspace.', - enum: ['personal', 'workspace'], - default: 'workspace', - }, - variables: { - type: 'array', - description: 'List of env vars to set', - items: { - type: 'object', - properties: { - name: { - type: 'string', - description: 'Variable name', - }, - value: { - type: 'string', - description: 'Variable value', - }, - }, - required: ['name', 'value'], - }, - }, - }, - required: ['variables'], - }, - resultSchema: undefined, - }, - set_global_workflow_variables: { - parameters: { - type: 'object', - properties: { - operations: { - type: 'array', - description: 'List of operations to apply', - items: { - type: 'object', - properties: { - name: { - type: 'string', + "type": "object", + "properties": { + "scope": { + "type": "string", + "description": "Whether to set workspace or personal environment variables. Defaults to workspace.", + "enum": [ + "personal", + "workspace" + ], + "default": "workspace" + }, + "variables": { + "type": "array", + "description": "List of env vars to set", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Variable name" }, - operation: { - type: 'string', - enum: ['add', 'delete', 'edit'], + "value": { + "type": "string", + "description": "Variable value" + } + }, + "required": [ + "name", + "value" + ] + } + } + }, + "required": [ + "variables" + ] + }, + resultSchema: undefined, + }, + ["set_global_workflow_variables"]: { + parameters: { + "type": "object", + "properties": { + "operations": { + "type": "array", + "description": "List of operations to apply", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" }, - type: { - type: 'string', - enum: ['plain', 'number', 'boolean', 'array', 'object'], + "operation": { + "type": "string", + "enum": [ + "add", + "delete", + "edit" + ] }, - value: { - type: 'string', + "type": { + "type": "string", + "enum": [ + "plain", + "number", + "boolean", + "array", + "object" + ] }, + "value": { + "type": "string" + } }, - required: ['operation', 'name', 'type', 'value'], - }, - }, - workflowId: { - type: 'string', - description: - 'Optional workflow ID. If not provided, uses the current workflow in context.', + "required": [ + "operation", + "name", + "type", + "value" + ] + } }, + "workflowId": { + "type": "string", + "description": "Optional workflow ID. If not provided, uses the current workflow in context." + } }, - required: ['operations'], + "required": [ + "operations" + ] }, resultSchema: undefined, }, - superagent: { + ["superagent"]: { parameters: { - properties: { - task: { - description: - "A single sentence — the agent has full conversation context. Do NOT pre-read credentials or look up configs. Example: 'send the email we discussed' or 'check my calendar for tomorrow'.", - type: 'string', - }, + "properties": { + "task": { + "description": "A single sentence — the agent has full conversation context. Do NOT pre-read credentials or look up configs. Example: 'send the email we discussed' or 'check my calendar for tomorrow'.", + "type": "string" + } }, - required: ['task'], - type: 'object', + "required": [ + "task" + ], + "type": "object" }, resultSchema: undefined, }, - table: { + ["table"]: { parameters: { - properties: { - request: { - description: 'What table action is needed.', - type: 'string', - }, + "properties": { + "request": { + "description": "What table action is needed.", + "type": "string" + } }, - required: ['request'], - type: 'object', + "required": [ + "request" + ], + "type": "object" }, resultSchema: undefined, }, - tool_search_tool_regex: { + ["tool_search_tool_regex"]: { parameters: { - properties: { - case_insensitive: { - description: 'Whether the regex should be case-insensitive (default true).', - type: 'boolean', + "properties": { + "case_insensitive": { + "description": "Whether the regex should be case-insensitive (default true).", + "type": "boolean" }, - max_results: { - description: 'Maximum number of tools to return (optional).', - type: 'integer', - }, - pattern: { - description: 'Regular expression to match tool names or descriptions.', - type: 'string', + "max_results": { + "description": "Maximum number of tools to return (optional).", + "type": "integer" }, + "pattern": { + "description": "Regular expression to match tool names or descriptions.", + "type": "string" + } }, - required: ['pattern'], - type: 'object', + "required": [ + "pattern" + ], + "type": "object" }, resultSchema: undefined, }, - update_job_history: { + ["update_job_history"]: { parameters: { - type: 'object', - properties: { - jobId: { - type: 'string', - description: 'The job ID.', - }, - summary: { - type: 'string', - description: - "A concise summary of what was done this run (e.g., 'Sent follow-up emails to 3 leads: Alice, Bob, Carol').", - }, + "type": "object", + "properties": { + "jobId": { + "type": "string", + "description": "The job ID." + }, + "summary": { + "type": "string", + "description": "A concise summary of what was done this run (e.g., 'Sent follow-up emails to 3 leads: Alice, Bob, Carol')." + } }, - required: ['jobId', 'summary'], + "required": [ + "jobId", + "summary" + ] }, resultSchema: undefined, }, - update_workspace_mcp_server: { + ["update_workspace_mcp_server"]: { parameters: { - type: 'object', - properties: { - description: { - type: 'string', - description: 'New description for the server', - }, - isPublic: { - type: 'boolean', - description: 'Whether the server is publicly accessible', + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "New description for the server" }, - name: { - type: 'string', - description: 'New name for the server', + "isPublic": { + "type": "boolean", + "description": "Whether the server is publicly accessible" }, - serverId: { - type: 'string', - description: 'Required: the MCP server ID to update', + "name": { + "type": "string", + "description": "New name for the server" }, + "serverId": { + "type": "string", + "description": "Required: the MCP server ID to update" + } }, - required: ['serverId'], + "required": [ + "serverId" + ] }, resultSchema: undefined, }, - user_memory: { + ["user_memory"]: { parameters: { - type: 'object', - properties: { - confidence: { - type: 'number', - description: 'Confidence level 0-1 (default 1.0 for explicit, 0.8 for inferred)', - }, - correct_value: { - type: 'string', - description: "The correct value to replace the wrong one (for 'correct' operation)", - }, - key: { - type: 'string', - description: "Unique key for the memory (e.g., 'preferred_model', 'slack_credential')", + "type": "object", + "properties": { + "confidence": { + "type": "number", + "description": "Confidence level 0-1 (default 1.0 for explicit, 0.8 for inferred)" }, - limit: { - type: 'number', - description: 'Number of results for search (default 10)', + "correct_value": { + "type": "string", + "description": "The correct value to replace the wrong one (for 'correct' operation)" }, - memory_type: { - type: 'string', - description: "Type of memory: 'preference', 'entity', 'history', or 'correction'", - enum: ['preference', 'entity', 'history', 'correction'], - }, - operation: { - type: 'string', - description: "Operation: 'add', 'search', 'delete', 'correct', or 'list'", - enum: ['add', 'search', 'delete', 'correct', 'list'], - }, - query: { - type: 'string', - description: 'Search query to find relevant memories', - }, - source: { - type: 'string', - description: "Source: 'explicit' (user told you) or 'inferred' (you observed)", - enum: ['explicit', 'inferred'], - }, - value: { - type: 'string', - description: 'Value to remember', - }, - }, - required: ['operation'], - }, - resultSchema: undefined, - }, - user_table: { - parameters: { - type: 'object', - properties: { - args: { - type: 'object', - description: 'Arguments for the operation', - properties: { - column: { - type: 'object', - description: 'Column definition for add_column: { name, type, unique?, position? }', - }, - columnName: { - type: 'string', - description: - 'Column name (required for rename_column, update_column; use columnNames array for batch delete_column)', - }, - columnNames: { - type: 'array', - description: - 'Array of column names to delete at once (for delete_column). Preferred over columnName when deleting multiple columns.', - }, - data: { - type: 'object', - description: 'Row data as key-value pairs (required for insert_row, update_row)', - }, - description: { - type: 'string', - description: "Table description (optional for 'create')", - }, - fileId: { - type: 'string', - description: - 'Canonical workspace file ID for create_from_file/import_file. Discover via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json").', - }, - filePath: { - type: 'string', - description: - 'Legacy workspace file reference for create_from_file/import_file. Prefer fileId.', - }, - filter: { - type: 'object', - description: - 'MongoDB-style filter for query_rows, update_rows_by_filter, delete_rows_by_filter', - }, - limit: { - type: 'number', - description: 'Maximum rows to return or affect (optional, default 100)', - }, - name: { - type: 'string', - description: "Table name (required for 'create')", - }, - newName: { - type: 'string', - description: 'New column name (required for rename_column)', - }, - newType: { - type: 'string', - description: - 'New column type (optional for update_column). Types: string, number, boolean, date, json', - }, - offset: { - type: 'number', - description: 'Number of rows to skip (optional for query_rows, default 0)', - }, - outputFormat: { - type: 'string', - description: - 'Explicit format override for outputPath. Usually unnecessary — the file extension determines the format automatically. Only use this to force a different format than what the extension implies.', - enum: ['json', 'csv', 'txt', 'md', 'html'], - }, - outputPath: { - type: 'string', - description: - 'Pipe query_rows results directly to a NEW workspace file. The format is auto-inferred from the file extension: .csv → CSV, .json → JSON, .md → Markdown, etc. Use .csv for tabular exports. Use a flat path like "files/export.csv" — nested paths are not supported.', - }, - rowId: { - type: 'string', - description: 'Row ID (required for get_row, update_row, delete_row)', - }, - rowIds: { - type: 'array', - description: 'Array of row IDs to delete (for batch_delete_rows)', - }, - rows: { - type: 'array', - description: 'Array of row data objects (required for batch_insert_rows)', - }, - schema: { - type: 'object', - description: - "Table schema with columns array (required for 'create'). Each column: { name, type, unique? }", - }, - sort: { - type: 'object', - description: - "Sort specification as { field: 'asc' | 'desc' } (optional for query_rows)", - }, - tableId: { - type: 'string', - description: - "Table ID (required for most operations except 'create' and batch 'delete')", - }, - tableIds: { - type: 'array', - description: 'Array of table IDs (for batch delete)', - items: { - type: 'string', - }, - }, - unique: { - type: 'boolean', - description: 'Set column unique constraint (optional for update_column)', - }, - updates: { - type: 'array', - description: - 'Array of per-row updates: [{ rowId, data: { col: val } }] (for batch_update_rows)', - }, - values: { - type: 'object', - description: - 'Map of rowId to value for single-column batch update: { "rowId1": val1, "rowId2": val2 } (for batch_update_rows with columnName)', - }, - }, + "key": { + "type": "string", + "description": "Unique key for the memory (e.g., 'preferred_model', 'slack_credential')" }, - operation: { - type: 'string', - description: 'The operation to perform', - enum: [ - 'create', - 'create_from_file', - 'import_file', - 'get', - 'get_schema', - 'delete', - 'insert_row', - 'batch_insert_rows', - 'get_row', - 'query_rows', - 'update_row', - 'delete_row', - 'update_rows_by_filter', - 'delete_rows_by_filter', - 'batch_update_rows', - 'batch_delete_rows', - 'add_column', - 'rename_column', - 'delete_column', - 'update_column', - ], + "limit": { + "type": "number", + "description": "Number of results for search (default 10)" }, - }, - required: ['operation', 'args'], + "memory_type": { + "type": "string", + "description": "Type of memory: 'preference', 'entity', 'history', or 'correction'", + "enum": [ + "preference", + "entity", + "history", + "correction" + ] + }, + "operation": { + "type": "string", + "description": "Operation: 'add', 'search', 'delete', 'correct', or 'list'", + "enum": [ + "add", + "search", + "delete", + "correct", + "list" + ] + }, + "query": { + "type": "string", + "description": "Search query to find relevant memories" + }, + "source": { + "type": "string", + "description": "Source: 'explicit' (user told you) or 'inferred' (you observed)", + "enum": [ + "explicit", + "inferred" + ] + }, + "value": { + "type": "string", + "description": "Value to remember" + } + }, + "required": [ + "operation" + ] + }, + resultSchema: undefined, + }, + ["user_table"]: { + parameters: { + "type": "object", + "properties": { + "args": { + "type": "object", + "description": "Arguments for the operation", + "properties": { + "column": { + "type": "object", + "description": "Column definition for add_column: { name, type, unique?, position? }" + }, + "columnName": { + "type": "string", + "description": "Column name (required for rename_column, update_column; use columnNames array for batch delete_column)" + }, + "columnNames": { + "type": "array", + "description": "Array of column names to delete at once (for delete_column). Preferred over columnName when deleting multiple columns." + }, + "data": { + "type": "object", + "description": "Row data as key-value pairs (required for insert_row, update_row)" + }, + "description": { + "type": "string", + "description": "Table description (optional for 'create')" + }, + "fileId": { + "type": "string", + "description": "Canonical workspace file ID for create_from_file/import_file. Discover via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\")." + }, + "filePath": { + "type": "string", + "description": "Legacy workspace file reference for create_from_file/import_file. Prefer fileId." + }, + "filter": { + "type": "object", + "description": "MongoDB-style filter for query_rows, update_rows_by_filter, delete_rows_by_filter" + }, + "limit": { + "type": "number", + "description": "Maximum rows to return or affect (optional, default 100)" + }, + "name": { + "type": "string", + "description": "Table name (required for 'create')" + }, + "newName": { + "type": "string", + "description": "New column name (required for rename_column)" + }, + "newType": { + "type": "string", + "description": "New column type (optional for update_column). Types: string, number, boolean, date, json" + }, + "offset": { + "type": "number", + "description": "Number of rows to skip (optional for query_rows, default 0)" + }, + "outputFormat": { + "type": "string", + "description": "Explicit format override for outputPath. Usually unnecessary — the file extension determines the format automatically. Only use this to force a different format than what the extension implies.", + "enum": [ + "json", + "csv", + "txt", + "md", + "html" + ] + }, + "outputPath": { + "type": "string", + "description": "Pipe query_rows results directly to a NEW workspace file. The format is auto-inferred from the file extension: .csv → CSV, .json → JSON, .md → Markdown, etc. Use .csv for tabular exports. Use a flat path like \"files/export.csv\" — nested paths are not supported." + }, + "rowId": { + "type": "string", + "description": "Row ID (required for get_row, update_row, delete_row)" + }, + "rowIds": { + "type": "array", + "description": "Array of row IDs to delete (for batch_delete_rows)" + }, + "rows": { + "type": "array", + "description": "Array of row data objects (required for batch_insert_rows)" + }, + "schema": { + "type": "object", + "description": "Table schema with columns array (required for 'create'). Each column: { name, type, unique? }" + }, + "sort": { + "type": "object", + "description": "Sort specification as { field: 'asc' | 'desc' } (optional for query_rows)" + }, + "tableId": { + "type": "string", + "description": "Table ID (required for most operations except 'create' and batch 'delete')" + }, + "tableIds": { + "type": "array", + "description": "Array of table IDs (for batch delete)", + "items": { + "type": "string" + } + }, + "unique": { + "type": "boolean", + "description": "Set column unique constraint (optional for update_column)" + }, + "updates": { + "type": "array", + "description": "Array of per-row updates: [{ rowId, data: { col: val } }] (for batch_update_rows)" + }, + "values": { + "type": "object", + "description": "Map of rowId to value for single-column batch update: { \"rowId1\": val1, \"rowId2\": val2 } (for batch_update_rows with columnName)" + } + } + }, + "operation": { + "type": "string", + "description": "The operation to perform", + "enum": [ + "create", + "create_from_file", + "import_file", + "get", + "get_schema", + "delete", + "insert_row", + "batch_insert_rows", + "get_row", + "query_rows", + "update_row", + "delete_row", + "update_rows_by_filter", + "delete_rows_by_filter", + "batch_update_rows", + "batch_delete_rows", + "add_column", + "rename_column", + "delete_column", + "update_column" + ] + } + }, + "required": [ + "operation", + "args" + ] }, resultSchema: { - type: 'object', - properties: { - data: { - type: 'object', - description: 'Operation-specific result payload.', - }, - message: { - type: 'string', - description: 'Human-readable outcome summary.', - }, - success: { - type: 'boolean', - description: 'Whether the operation succeeded.', - }, - }, - required: ['success', 'message'], - }, - }, - workflow: { - parameters: { - type: 'object', - }, - resultSchema: undefined, - }, - workspace_file: { - parameters: { - type: 'object', - properties: { - operation: { - type: 'string', - description: 'The file operation to perform.', - enum: ['append', 'update', 'patch'], - }, - target: { - type: 'object', - description: 'Explicit file target. Use kind=file_id + fileId for existing files.', - properties: { - fileId: { - type: 'string', - description: - 'Canonical existing workspace file ID. Required when target.kind=file_id.', - }, - fileName: { - type: 'string', - description: - 'Plain workspace filename including extension, e.g. "main.py" or "report.docx". Required when target.kind=new_file.', - }, - kind: { - type: 'string', - description: 'How the file target is identified.', - enum: ['new_file', 'file_id'], - }, - }, - required: ['kind'], - }, - title: { - type: 'string', - description: - 'Required short UI label for this content unit, e.g. "Chapter 1", "Slide 3", or "Fix footer spacing".', - }, - contentType: { - type: 'string', - description: - 'Optional MIME type override. Usually omit and let the system infer from the target file extension.', - enum: [ - 'text/markdown', - 'text/html', - 'text/plain', - 'application/json', - 'text/csv', - 'application/vnd.openxmlformats-officedocument.presentationml.presentation', - 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', - 'application/pdf', - ], - }, - edit: { - type: 'object', - description: - 'Patch metadata. Use strategy=search_replace for exact text replacement, or strategy=anchored for line-based inserts/replacements/deletions. The actual replacement/insert content is provided via the paired edit_content tool call.', - properties: { - after_anchor: { - type: 'string', - description: - 'Boundary line kept after inserted replacement content. Required for mode=replace_between.', - }, - anchor: { - type: 'string', - description: - 'Anchor line after which new content is inserted. Required for mode=insert_after.', - }, - before_anchor: { - type: 'string', - description: - 'Boundary line kept before inserted replacement content. Required for mode=replace_between.', - }, - end_anchor: { - type: 'string', - description: 'First line to keep after deletion. Required for mode=delete_between.', - }, - mode: { - type: 'string', - description: 'Anchored edit mode when strategy=anchored.', - enum: ['replace_between', 'insert_after', 'delete_between'], - }, - occurrence: { - type: 'number', - description: '1-based occurrence for repeated anchor lines. Optional; defaults to 1.', - }, - replaceAll: { - type: 'boolean', - description: - 'When true and strategy=search_replace, replace every match instead of requiring a unique single match.', - }, - search: { - type: 'string', - description: - 'Exact text to find when strategy=search_replace. Must match exactly once unless replaceAll=true.', - }, - start_anchor: { - type: 'string', - description: 'First line to delete. Required for mode=delete_between.', - }, - strategy: { - type: 'string', - description: 'Patch strategy.', - enum: ['search_replace', 'anchored'], - }, + "type": "object", + "properties": { + "data": { + "type": "object", + "description": "Operation-specific result payload." + }, + "message": { + "type": "string", + "description": "Human-readable outcome summary." + }, + "success": { + "type": "boolean", + "description": "Whether the operation succeeded." + } + }, + "required": [ + "success", + "message" + ] + }, + }, + ["workflow"]: { + parameters: { + "type": "object" + }, + resultSchema: undefined, + }, + ["workspace_file"]: { + parameters: { + "type": "object", + "properties": { + "operation": { + "type": "string", + "description": "The file operation to perform.", + "enum": [ + "append", + "update", + "patch" + ] + }, + "target": { + "type": "object", + "description": "Explicit file target. Use kind=file_id + fileId for existing files.", + "properties": { + "fileId": { + "type": "string", + "description": "Canonical existing workspace file ID. Required when target.kind=file_id." + }, + "fileName": { + "type": "string", + "description": "Plain workspace filename including extension, e.g. \"main.py\" or \"report.docx\". Required when target.kind=new_file." + }, + "kind": { + "type": "string", + "description": "How the file target is identified.", + "enum": [ + "new_file", + "file_id" + ] + } }, - }, - newName: { - type: 'string', - description: - 'New file name for rename. Must be a plain workspace filename like "main.py".', - }, - }, - required: ['operation', 'target', 'title'], + "required": [ + "kind" + ] + }, + "title": { + "type": "string", + "description": "Required short UI label for this content unit, e.g. \"Chapter 1\", \"Slide 3\", or \"Fix footer spacing\"." + }, + "contentType": { + "type": "string", + "description": "Optional MIME type override. Usually omit and let the system infer from the target file extension.", + "enum": [ + "text/markdown", + "text/html", + "text/plain", + "application/json", + "text/csv", + "application/vnd.openxmlformats-officedocument.presentationml.presentation", + "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "application/pdf" + ] + }, + "edit": { + "type": "object", + "description": "Patch metadata. Use strategy=search_replace for exact text replacement, or strategy=anchored for line-based inserts/replacements/deletions. The actual replacement/insert content is provided via the paired edit_content tool call.", + "properties": { + "after_anchor": { + "type": "string", + "description": "Boundary line kept after inserted replacement content. Required for mode=replace_between." + }, + "anchor": { + "type": "string", + "description": "Anchor line after which new content is inserted. Required for mode=insert_after." + }, + "before_anchor": { + "type": "string", + "description": "Boundary line kept before inserted replacement content. Required for mode=replace_between." + }, + "end_anchor": { + "type": "string", + "description": "First line to keep after deletion. Required for mode=delete_between." + }, + "mode": { + "type": "string", + "description": "Anchored edit mode when strategy=anchored.", + "enum": [ + "replace_between", + "insert_after", + "delete_between" + ] + }, + "occurrence": { + "type": "number", + "description": "1-based occurrence for repeated anchor lines. Optional; defaults to 1." + }, + "replaceAll": { + "type": "boolean", + "description": "When true and strategy=search_replace, replace every match instead of requiring a unique single match." + }, + "search": { + "type": "string", + "description": "Exact text to find when strategy=search_replace. Must match exactly once unless replaceAll=true." + }, + "start_anchor": { + "type": "string", + "description": "First line to delete. Required for mode=delete_between." + }, + "strategy": { + "type": "string", + "description": "Patch strategy.", + "enum": [ + "search_replace", + "anchored" + ] + } + } + }, + "newName": { + "type": "string", + "description": "New file name for rename. Must be a plain workspace filename like \"main.py\"." + } + }, + "required": [ + "operation", + "target", + "title" + ] }, resultSchema: { - type: 'object', - properties: { - data: { - type: 'object', - description: - 'Optional operation metadata such as file id, file name, size, and content type.', - }, - message: { - type: 'string', - description: 'Human-readable summary of the outcome.', - }, - success: { - type: 'boolean', - description: 'Whether the file operation succeeded.', - }, - }, - required: ['success', 'message'], + "type": "object", + "properties": { + "data": { + "type": "object", + "description": "Optional operation metadata such as file id, file name, size, and content type." + }, + "message": { + "type": "string", + "description": "Human-readable summary of the outcome." + }, + "success": { + "type": "boolean", + "description": "Whether the file operation succeeded." + } + }, + "required": [ + "success", + "message" + ] }, }, } diff --git a/apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts b/apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts new file mode 100644 index 00000000000..19a1f6b0148 --- /dev/null +++ b/apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts @@ -0,0 +1,350 @@ +// AUTO-GENERATED FILE. DO NOT EDIT. +// +// Source: copilot/copilot/contracts/trace-attribute-values-v1.schema.json +// Regenerate with: bun run trace-attribute-values-contract:generate +// +// Canonical closed-set value vocabularies for mothership OTel +// attributes. Call sites should reference e.g. +// `CopilotRequestCancelReason.ExplicitStop` rather than the raw +// string literal, so typos become compile errors and the Go contract +// remains the single source of truth. + +export const AbortBackend = { + InProcess: "in_process", + Redis: "redis", +} as const; + +export type AbortBackendKey = keyof typeof AbortBackend; +export type AbortBackendValue = (typeof AbortBackend)[AbortBackendKey]; + +export const AbortRedisResult = { + Error: "error", + Ok: "ok", + Slow: "slow", +} as const; + +export type AbortRedisResultKey = keyof typeof AbortRedisResult; +export type AbortRedisResultValue = (typeof AbortRedisResult)[AbortRedisResultKey]; + +export const AuthKeyMatch = { + Enterprise: "enterprise", + None: "none", + User: "user", +} as const; + +export type AuthKeyMatchKey = keyof typeof AuthKeyMatch; +export type AuthKeyMatchValue = (typeof AuthKeyMatch)[AuthKeyMatchKey]; + +export const BillingAnalyticsOutcome = { + Duplicate: "duplicate", + RetriesExhausted: "retries_exhausted", + Success: "success", + Unknown: "unknown", +} as const; + +export type BillingAnalyticsOutcomeKey = keyof typeof BillingAnalyticsOutcome; +export type BillingAnalyticsOutcomeValue = (typeof BillingAnalyticsOutcome)[BillingAnalyticsOutcomeKey]; + +export const BillingFlushOutcome = { + CheckpointAlreadyClaimed: "checkpoint_already_claimed", + CheckpointLoadFailed: "checkpoint_load_failed", + Flushed: "flushed", + NoCheckpoint: "no_checkpoint", + NoSnapshot: "no_snapshot", + SkippedUnconfigured: "skipped_unconfigured", +} as const; + +export type BillingFlushOutcomeKey = keyof typeof BillingFlushOutcome; +export type BillingFlushOutcomeValue = (typeof BillingFlushOutcome)[BillingFlushOutcomeKey]; + +export const BillingRouteOutcome = { + AuthFailed: "auth_failed", + Billed: "billed", + BillingDisabled: "billing_disabled", + DuplicateIdempotencyKey: "duplicate_idempotency_key", + InternalError: "internal_error", + InvalidBody: "invalid_body", +} as const; + +export type BillingRouteOutcomeKey = keyof typeof BillingRouteOutcome; +export type BillingRouteOutcomeValue = (typeof BillingRouteOutcome)[BillingRouteOutcomeKey]; + +export const CopilotAbortOutcome = { + BadRequest: "bad_request", + FallbackPersistFailed: "fallback_persist_failed", + MissingMessageId: "missing_message_id", + MissingStreamId: "missing_stream_id", + NoChatId: "no_chat_id", + Ok: "ok", + SettleTimeout: "settle_timeout", + Settled: "settled", + Unauthorized: "unauthorized", +} as const; + +export type CopilotAbortOutcomeKey = keyof typeof CopilotAbortOutcome; +export type CopilotAbortOutcomeValue = (typeof CopilotAbortOutcome)[CopilotAbortOutcomeKey]; + +export const CopilotBranchKind = { + Workflow: "workflow", + Workspace: "workspace", +} as const; + +export type CopilotBranchKindKey = keyof typeof CopilotBranchKind; +export type CopilotBranchKindValue = (typeof CopilotBranchKind)[CopilotBranchKindKey]; + +export const CopilotChatFinalizeOutcome = { + AppendedAssistant: "appended_assistant", + AssistantAlreadyPersisted: "assistant_already_persisted", + ClearedStreamMarkerOnly: "cleared_stream_marker_only", + StaleUserMessage: "stale_user_message", +} as const; + +export type CopilotChatFinalizeOutcomeKey = keyof typeof CopilotChatFinalizeOutcome; +export type CopilotChatFinalizeOutcomeValue = (typeof CopilotChatFinalizeOutcome)[CopilotChatFinalizeOutcomeKey]; + +export const CopilotChatPersistOutcome = { + Appended: "appended", + ChatNotFound: "chat_not_found", +} as const; + +export type CopilotChatPersistOutcomeKey = keyof typeof CopilotChatPersistOutcome; +export type CopilotChatPersistOutcomeValue = (typeof CopilotChatPersistOutcome)[CopilotChatPersistOutcomeKey]; + +export const CopilotConfirmOutcome = { + Delivered: "delivered", + Forbidden: "forbidden", + InternalError: "internal_error", + RunNotFound: "run_not_found", + ToolCallNotFound: "tool_call_not_found", + Unauthorized: "unauthorized", + UpdateFailed: "update_failed", + ValidationError: "validation_error", +} as const; + +export type CopilotConfirmOutcomeKey = keyof typeof CopilotConfirmOutcome; +export type CopilotConfirmOutcomeValue = (typeof CopilotConfirmOutcome)[CopilotConfirmOutcomeKey]; + +export const CopilotFinalizeOutcome = { + Aborted: "aborted", + Error: "error", + Success: "success", +} as const; + +export type CopilotFinalizeOutcomeKey = keyof typeof CopilotFinalizeOutcome; +export type CopilotFinalizeOutcomeValue = (typeof CopilotFinalizeOutcome)[CopilotFinalizeOutcomeKey]; + +export const CopilotLeg = { + SimToGo: "sim_to_go", +} as const; + +export type CopilotLegKey = keyof typeof CopilotLeg; +export type CopilotLegValue = (typeof CopilotLeg)[CopilotLegKey]; + +export const CopilotOutputFileOutcome = { + Failed: "failed", + Uploaded: "uploaded", +} as const; + +export type CopilotOutputFileOutcomeKey = keyof typeof CopilotOutputFileOutcome; +export type CopilotOutputFileOutcomeValue = (typeof CopilotOutputFileOutcome)[CopilotOutputFileOutcomeKey]; + +export const CopilotRecoveryOutcome = { + GapDetected: "gap_detected", + InRange: "in_range", +} as const; + +export type CopilotRecoveryOutcomeKey = keyof typeof CopilotRecoveryOutcome; +export type CopilotRecoveryOutcomeValue = (typeof CopilotRecoveryOutcome)[CopilotRecoveryOutcomeKey]; + +export const CopilotRequestCancelReason = { + ClientDisconnect: "client_disconnect", + ExplicitStop: "explicit_stop", + Timeout: "timeout", + Unknown: "unknown", +} as const; + +export type CopilotRequestCancelReasonKey = keyof typeof CopilotRequestCancelReason; +export type CopilotRequestCancelReasonValue = (typeof CopilotRequestCancelReason)[CopilotRequestCancelReasonKey]; + +export const CopilotResourcesOp = { + Delete: "delete", + None: "none", + Upsert: "upsert", +} as const; + +export type CopilotResourcesOpKey = keyof typeof CopilotResourcesOp; +export type CopilotResourcesOpValue = (typeof CopilotResourcesOp)[CopilotResourcesOpKey]; + +export const CopilotResumeOutcome = { + BatchDelivered: "batch_delivered", + ClientDisconnected: "client_disconnected", + EndedWithoutTerminal: "ended_without_terminal", + StreamNotFound: "stream_not_found", + TerminalDelivered: "terminal_delivered", +} as const; + +export type CopilotResumeOutcomeKey = keyof typeof CopilotResumeOutcome; +export type CopilotResumeOutcomeValue = (typeof CopilotResumeOutcome)[CopilotResumeOutcomeKey]; + +export const CopilotStopOutcome = { + ChatNotFound: "chat_not_found", + InternalError: "internal_error", + NoMatchingRow: "no_matching_row", + Persisted: "persisted", + Unauthorized: "unauthorized", + ValidationError: "validation_error", +} as const; + +export type CopilotStopOutcomeKey = keyof typeof CopilotStopOutcome; +export type CopilotStopOutcomeValue = (typeof CopilotStopOutcome)[CopilotStopOutcomeKey]; + +export const CopilotSurface = { + Copilot: "copilot", + Mothership: "mothership", +} as const; + +export type CopilotSurfaceKey = keyof typeof CopilotSurface; +export type CopilotSurfaceValue = (typeof CopilotSurface)[CopilotSurfaceKey]; + +export const CopilotTableOutcome = { + EmptyContent: "empty_content", + EmptyRows: "empty_rows", + Failed: "failed", + Imported: "imported", + InvalidJsonShape: "invalid_json_shape", + InvalidShape: "invalid_shape", + RowLimitExceeded: "row_limit_exceeded", + TableNotFound: "table_not_found", + Wrote: "wrote", +} as const; + +export type CopilotTableOutcomeKey = keyof typeof CopilotTableOutcome; +export type CopilotTableOutcomeValue = (typeof CopilotTableOutcome)[CopilotTableOutcomeKey]; + +export const CopilotTableSourceFormat = { + Csv: "csv", + Json: "json", +} as const; + +export type CopilotTableSourceFormatKey = keyof typeof CopilotTableSourceFormat; +export type CopilotTableSourceFormatValue = (typeof CopilotTableSourceFormat)[CopilotTableSourceFormatKey]; + +export const CopilotTransport = { + Batch: "batch", + Headless: "headless", + Stream: "stream", +} as const; + +export type CopilotTransportKey = keyof typeof CopilotTransport; +export type CopilotTransportValue = (typeof CopilotTransport)[CopilotTransportKey]; + +export const CopilotValidateOutcome = { + InternalAuthFailed: "internal_auth_failed", + InternalError: "internal_error", + InvalidBody: "invalid_body", + Ok: "ok", + UsageExceeded: "usage_exceeded", + UserNotFound: "user_not_found", +} as const; + +export type CopilotValidateOutcomeKey = keyof typeof CopilotValidateOutcome; +export type CopilotValidateOutcomeValue = (typeof CopilotValidateOutcome)[CopilotValidateOutcomeKey]; + +export const CopilotVfsOutcome = { + PassthroughFitsBudget: "passthrough_fits_budget", + PassthroughNoMetadata: "passthrough_no_metadata", + PassthroughNoSharp: "passthrough_no_sharp", + RejectedNoMetadata: "rejected_no_metadata", + RejectedNoSharp: "rejected_no_sharp", + RejectedTooLargeAfterResize: "rejected_too_large_after_resize", + Resized: "resized", +} as const; + +export type CopilotVfsOutcomeKey = keyof typeof CopilotVfsOutcome; +export type CopilotVfsOutcomeValue = (typeof CopilotVfsOutcome)[CopilotVfsOutcomeKey]; + +export const CopilotVfsReadOutcome = { + BinaryPlaceholder: "binary_placeholder", + DocumentParsed: "document_parsed", + ImagePrepared: "image_prepared", + ImageTooLarge: "image_too_large", + ParseFailed: "parse_failed", + ReadFailed: "read_failed", + TextRead: "text_read", + TextTooLarge: "text_too_large", +} as const; + +export type CopilotVfsReadOutcomeKey = keyof typeof CopilotVfsReadOutcome; +export type CopilotVfsReadOutcomeValue = (typeof CopilotVfsReadOutcome)[CopilotVfsReadOutcomeKey]; + +export const CopilotVfsReadPath = { + Binary: "binary", + Image: "image", + ParseableDocument: "parseable_document", + Text: "text", +} as const; + +export type CopilotVfsReadPathKey = keyof typeof CopilotVfsReadPath; +export type CopilotVfsReadPathValue = (typeof CopilotVfsReadPath)[CopilotVfsReadPathKey]; + +export const LlmErrorStage = { + BuildRequest: "build_request", + Decode: "decode", + HttpBuild: "http_build", + HttpStatus: "http_status", + Invoke: "invoke", + MarshalRequest: "marshal_request", +} as const; + +export type LlmErrorStageKey = keyof typeof LlmErrorStage; +export type LlmErrorStageValue = (typeof LlmErrorStage)[LlmErrorStageKey]; + +export const RateLimitOutcome = { + Allowed: "allowed", + IncrError: "incr_error", + Limited: "limited", +} as const; + +export type RateLimitOutcomeKey = keyof typeof RateLimitOutcome; +export type RateLimitOutcomeValue = (typeof RateLimitOutcome)[RateLimitOutcomeKey]; + +export const ToolAsyncWaiterResolution = { + ContextCancelled: "context_cancelled", + Poll: "poll", + Pubsub: "pubsub", + StoredAfterClose: "stored_after_close", + StoredBeforeSubscribe: "stored_before_subscribe", + StoredPostSubscribe: "stored_post_subscribe", + SubscriptionClosed: "subscription_closed", + Unknown: "unknown", +} as const; + +export type ToolAsyncWaiterResolutionKey = keyof typeof ToolAsyncWaiterResolution; +export type ToolAsyncWaiterResolutionValue = (typeof ToolAsyncWaiterResolution)[ToolAsyncWaiterResolutionKey]; + +export const ToolErrorKind = { + Dispatch: "dispatch", + NotFound: "not_found", +} as const; + +export type ToolErrorKindKey = keyof typeof ToolErrorKind; +export type ToolErrorKindValue = (typeof ToolErrorKind)[ToolErrorKindKey]; + +export const ToolExecutor = { + Client: "client", + Go: "go", + Sim: "sim", +} as const; + +export type ToolExecutorKey = keyof typeof ToolExecutor; +export type ToolExecutorValue = (typeof ToolExecutor)[ToolExecutorKey]; + +export const ToolStoreStatus = { + Cancelled: "cancelled", + Completed: "completed", + Failed: "failed", + Pending: "pending", +} as const; + +export type ToolStoreStatusKey = keyof typeof ToolStoreStatus; +export type ToolStoreStatusValue = (typeof ToolStoreStatus)[ToolStoreStatusKey]; diff --git a/apps/sim/lib/copilot/generated/trace-attributes-v1.ts b/apps/sim/lib/copilot/generated/trace-attributes-v1.ts index 1f1e9e84a6a..499d4ddc76d 100644 --- a/apps/sim/lib/copilot/generated/trace-attributes-v1.ts +++ b/apps/sim/lib/copilot/generated/trace-attributes-v1.ts @@ -16,953 +16,963 @@ // by the upstream OTel spec, not by this contract. export const TraceAttr = { - AbortBackend: 'abort.backend', - AbortFound: 'abort.found', - AbortRedisResult: 'abort.redis_result', - AnalyticsAborted: 'analytics.aborted', - AnalyticsBilledTotalCost: 'analytics.billed_total_cost', - AnalyticsCacheReadTokens: 'analytics.cache_read_tokens', - AnalyticsCacheWriteTokens: 'analytics.cache_write_tokens', - AnalyticsCustomerType: 'analytics.customer_type', - AnalyticsDurationMs: 'analytics.duration_ms', - AnalyticsError: 'analytics.error', - AnalyticsInputTokens: 'analytics.input_tokens', - AnalyticsModel: 'analytics.model', - AnalyticsOutputTokens: 'analytics.output_tokens', - AnalyticsProvider: 'analytics.provider', - AnalyticsSource: 'analytics.source', - AnalyticsToolCallCount: 'analytics.tool_call_count', - ApiKeyId: 'api_key.id', - ApiKeyName: 'api_key.name', - AuthIncomingInternal: 'auth.incoming_internal', - AuthKeyMatch: 'auth.key.match', - AuthKeyPreview: 'auth.key.preview', - AuthKeySource: 'auth.key.source', - AuthKeyType: 'auth.key.type', - AuthProvider: 'auth.provider', - AuthValidateStatusCode: 'auth.validate.status_code', - AwsRegion: 'aws.region', - BedrockErrorCode: 'bedrock.error_code', - BedrockModelId: 'bedrock.model_id', - BedrockRequestBodyBytesRetry: 'bedrock.request.body_bytes_retry', - BillingAttempts: 'billing.attempts', - BillingChangeType: 'billing.change_type', - BillingCostInputUsd: 'billing.cost.input_usd', - BillingCostOutputUsd: 'billing.cost.output_usd', - BillingCostTotalUsd: 'billing.cost.total_usd', - BillingCostUsd: 'billing.cost_usd', - BillingCustomerType: 'billing.customer_type', - BillingDuplicate: 'billing.duplicate', - BillingDurationMs: 'billing.duration_ms', - BillingHasIdempotencyKey: 'billing.has_idempotency_key', - BillingIdempotencyKey: 'billing.idempotency_key', - BillingInterval: 'billing.interval', - BillingIsMcp: 'billing.is_mcp', - BillingLlmCost: 'billing.llm_cost', - BillingNewPlan: 'billing.new_plan', - BillingOutcome: 'billing.outcome', - BillingPlan: 'billing.plan', - BillingPreviousPlan: 'billing.previous_plan', - BillingServiceCharges: 'billing.service_charges', - BillingSource: 'billing.source', - BillingTotalCost: 'billing.total_cost', - BillingUsageCurrent: 'billing.usage.current', - BillingUsageExceeded: 'billing.usage.exceeded', - BillingUsageLimit: 'billing.usage.limit', - BlockId: 'block.id', - BlockName: 'block.name', - BlockType: 'block.type', - ChatActiveMessagesBytes: 'chat.active_messages_bytes', - ChatActiveMessagesCount: 'chat.active_messages_count', - ChatAppendBytes: 'chat.append_bytes', - ChatAppendCount: 'chat.append_count', - ChatArtifactKeys: 'chat.artifact_keys', - ChatArtifactsBytes: 'chat.artifacts_bytes', - ChatAuthType: 'chat.auth_type', - ChatContextCount: 'chat.context_count', - ChatContextUsage: 'chat.context_usage', - ChatContinuationMessagesBefore: 'chat.continuation.messages_before', - ChatContinuationToolResultBytes: 'chat.continuation.tool_result_bytes', - ChatContinuationToolResultFailure: 'chat.continuation.tool_result_failure', - ChatContinuationToolResultSuccess: 'chat.continuation.tool_result_success', - ChatContinuationToolResults: 'chat.continuation.tool_results', - ChatContinuationTotalToolCalls: 'chat.continuation.total_tool_calls', - ChatExistingMessageCount: 'chat.existing_message_count', - ChatFileAttachmentCount: 'chat.file_attachment_count', - ChatFinalizeOutcome: 'chat.finalize.outcome', - ChatFound: 'chat.found', - ChatHasAssistantMessage: 'chat.has_assistant_message', - ChatHasOutputConfigs: 'chat.has_output_configs', - ChatId: 'chat.id', - ChatMessageBytes: 'chat.message_bytes', - ChatMessagesAfter: 'chat.messages_after', - ChatMessagesBytes: 'chat.messages_bytes', - ChatMessagesCount: 'chat.messages_count', - ChatPersistOutcome: 'chat.persist.outcome', - ChatPreexisting: 'chat.preexisting', - ChatRollbackIndex: 'chat.rollback_index', - ChatTokensUsed: 'chat.tokens_used', - ChatType: 'chat.type', - ChatUserMessageId: 'chat.user_message_id', - CheckpointAge: 'checkpoint.age', - CheckpointAttemptsBytes: 'checkpoint.attempts_bytes', - CheckpointBytesAssistantToolUse: 'checkpoint.bytes.assistant_tool_use', - CheckpointBytesCurrentMessages: 'checkpoint.bytes.current_messages', - CheckpointBytesImmediateResults: 'checkpoint.bytes.immediate_results', - CheckpointBytesPendingToolCalls: 'checkpoint.bytes.pending_tool_calls', - CheckpointBytesProviderRequest: 'checkpoint.bytes.provider_request', - CheckpointBytesRequestContext: 'checkpoint.bytes.request_context', - CheckpointBytesToolUsage: 'checkpoint.bytes.tool_usage', - CheckpointCachedCredentialsBytes: 'checkpoint.cached_credentials_bytes', - CheckpointClaimed: 'checkpoint.claimed', - CheckpointClaimedNow: 'checkpoint.claimed_now', - CheckpointCompletedBytes: 'checkpoint.completed_bytes', - CheckpointCompletedSteps: 'checkpoint.completed_steps', - CheckpointCurrentMessages: 'checkpoint.current_messages', - CheckpointDecisionsBytes: 'checkpoint.decisions_bytes', - CheckpointFound: 'checkpoint.found', - CheckpointFrames: 'checkpoint.frames', - CheckpointId: 'checkpoint.id', - CheckpointImmediateResults: 'checkpoint.immediate_results', - CheckpointMessageId: 'checkpoint.message_id', - CheckpointPendingBytes: 'checkpoint.pending_bytes', - CheckpointPendingSteps: 'checkpoint.pending_steps', - CheckpointPendingToolCount: 'checkpoint.pending_tool_count', - CheckpointRows: 'checkpoint.rows', - CheckpointTaskId: 'checkpoint.task_id', - CheckpointTotalToolCalls: 'checkpoint.total_tool_calls', - CheckpointWorkflowSnapshotBytes: 'checkpoint.workflow_snapshot_bytes', - ClientVersion: 'client.version', - ConditionId: 'condition.id', - ConditionName: 'condition.name', - ConditionResult: 'condition.result', - ContextReduceBudgetChars: 'context.reduce.budget_chars', - ContextReduceCaller: 'context.reduce.caller', - ContextReduceDidReduce: 'context.reduce.did_reduce', - ContextReduceInputChars: 'context.reduce.input_chars', - ContextReduceInputMessages: 'context.reduce.input_messages', - ContextReduceOutcome: 'context.reduce.outcome', - ContextReduceOutputChars: 'context.reduce.output_chars', - ContextReduceOutputMessages: 'context.reduce.output_messages', - ContextReduced: 'context.reduced', - ContextSummarizeInputChars: 'context.summarize.input_chars', - ContextSummarizeOutputChars: 'context.summarize.output_chars', - CopilotAbortGoMarkerOk: 'copilot.abort.go_marker_ok', - CopilotAbortLocalAborted: 'copilot.abort.local_aborted', - CopilotAbortOutcome: 'copilot.abort.outcome', - CopilotAsyncToolClaimedBy: 'copilot.async_tool.claimed_by', - CopilotAsyncToolHasError: 'copilot.async_tool.has_error', - CopilotAsyncToolIdsCount: 'copilot.async_tool.ids_count', - CopilotAsyncToolStatus: 'copilot.async_tool.status', - CopilotAsyncToolWorkerId: 'copilot.async_tool.worker_id', - CopilotBranchKind: 'copilot.branch.kind', - CopilotChatIsNew: 'copilot.chat.is_new', - CopilotCommandsCount: 'copilot.commands.count', - CopilotConfirmOutcome: 'copilot.confirm.outcome', - CopilotContextsCount: 'copilot.contexts.count', - CopilotFileAttachmentsCount: 'copilot.file_attachments.count', - CopilotFinalizeOutcome: 'copilot.finalize.outcome', - CopilotInterruptedPriorStream: 'copilot.interrupted_prior_stream', - CopilotLeg: 'copilot.leg', - CopilotMode: 'copilot.mode', - CopilotOperation: 'copilot.operation', - CopilotOutputFileBytes: 'copilot.output_file.bytes', - CopilotOutputFileFormat: 'copilot.output_file.format', - CopilotOutputFileId: 'copilot.output_file.id', - CopilotOutputFileName: 'copilot.output_file.name', - CopilotOutputFileOutcome: 'copilot.output_file.outcome', - CopilotPendingStreamWaitMs: 'copilot.pending_stream.wait_ms', - CopilotPrefetch: 'copilot.prefetch', - CopilotPublisherClientDisconnected: 'copilot.publisher.client_disconnected', - CopilotPublisherSawComplete: 'copilot.publisher.saw_complete', - CopilotRecoveryLatestSeq: 'copilot.recovery.latest_seq', - CopilotRecoveryOldestSeq: 'copilot.recovery.oldest_seq', - CopilotRecoveryOutcome: 'copilot.recovery.outcome', - CopilotRecoveryRequestedAfterSeq: 'copilot.recovery.requested_after_seq', - CopilotRequestOutcome: 'copilot.request.outcome', - CopilotResourceAttachmentsCount: 'copilot.resource_attachments.count', - CopilotResourcesAborted: 'copilot.resources.aborted', - CopilotResourcesOp: 'copilot.resources.op', - CopilotResourcesRemovedCount: 'copilot.resources.removed_count', - CopilotResourcesUpsertedCount: 'copilot.resources.upserted_count', - CopilotResultContentBlocks: 'copilot.result.content_blocks', - CopilotResultContentLength: 'copilot.result.content_length', - CopilotResultToolCalls: 'copilot.result.tool_calls', - CopilotResumeAfterCursor: 'copilot.resume.after_cursor', - CopilotResumeDurationMs: 'copilot.resume.duration_ms', - CopilotResumeEventCount: 'copilot.resume.event_count', - CopilotResumeOutcome: 'copilot.resume.outcome', - CopilotResumePollIterations: 'copilot.resume.poll_iterations', - CopilotResumePreviewSessionCount: 'copilot.resume.preview_session_count', - CopilotRoute: 'copilot.route', - CopilotRunAgent: 'copilot.run.agent', - CopilotRunHasCompletedAt: 'copilot.run.has_completed_at', - CopilotRunHasError: 'copilot.run.has_error', - CopilotRunModel: 'copilot.run.model', - CopilotRunParentId: 'copilot.run.parent_id', - CopilotRunProvider: 'copilot.run.provider', - CopilotRunStatus: 'copilot.run.status', - CopilotStopAppendedAssistant: 'copilot.stop.appended_assistant', - CopilotStopBlocksCount: 'copilot.stop.blocks_count', - CopilotStopContentLength: 'copilot.stop.content_length', - CopilotStopOutcome: 'copilot.stop.outcome', - CopilotStream: 'copilot.stream', - CopilotSurface: 'copilot.surface', - CopilotTableId: 'copilot.table.id', - CopilotTableOutcome: 'copilot.table.outcome', - CopilotTableRowCount: 'copilot.table.row_count', - CopilotTableSourceContentBytes: 'copilot.table.source.content_bytes', - CopilotTableSourceFormat: 'copilot.table.source.format', - CopilotTableSourcePath: 'copilot.table.source.path', - CopilotTraceSpanCount: 'copilot.trace.span_count', - CopilotTransport: 'copilot.transport', - CopilotUserMessagePreview: 'copilot.user.message_preview', - CopilotValidateOutcome: 'copilot.validate.outcome', - CopilotVfsFileExtension: 'copilot.vfs.file.extension', - CopilotVfsFileMediaType: 'copilot.vfs.file.media_type', - CopilotVfsFileName: 'copilot.vfs.file.name', - CopilotVfsFileSizeBytes: 'copilot.vfs.file.size_bytes', - CopilotVfsHasAlpha: 'copilot.vfs.has_alpha', - CopilotVfsInputBytes: 'copilot.vfs.input.bytes', - CopilotVfsInputHeight: 'copilot.vfs.input.height', - CopilotVfsInputMediaTypeClaimed: 'copilot.vfs.input.media_type_claimed', - CopilotVfsInputMediaTypeDetected: 'copilot.vfs.input.media_type_detected', - CopilotVfsInputWidth: 'copilot.vfs.input.width', - CopilotVfsMetadataFailed: 'copilot.vfs.metadata.failed', - CopilotVfsOutcome: 'copilot.vfs.outcome', - CopilotVfsOutputBytes: 'copilot.vfs.output.bytes', - CopilotVfsOutputMediaType: 'copilot.vfs.output.media_type', - CopilotVfsReadImageResized: 'copilot.vfs.read.image.resized', - CopilotVfsReadOutcome: 'copilot.vfs.read.outcome', - CopilotVfsReadOutputBytes: 'copilot.vfs.read.output.bytes', - CopilotVfsReadOutputLines: 'copilot.vfs.read.output.lines', - CopilotVfsReadOutputMediaType: 'copilot.vfs.read.output.media_type', - CopilotVfsReadPath: 'copilot.vfs.read.path', - CopilotVfsResizeAttempts: 'copilot.vfs.resize.attempts', - CopilotVfsResizeChosenDimension: 'copilot.vfs.resize.chosen_dimension', - CopilotVfsResizeChosenQuality: 'copilot.vfs.resize.chosen_quality', - CopilotVfsResized: 'copilot.vfs.resized', - CopilotVfsSharpLoadFailed: 'copilot.vfs.sharp.load_failed', - CostDefaultCost: 'cost.default_cost', - CredentialSetId: 'credential_set.id', - CredentialSetName: 'credential_set.name', - DbOperation: 'db.operation', - DbSqlTable: 'db.sql.table', - DbSystem: 'db.system', - DeploymentEnvironment: 'deployment.environment', - DeploymentVersion: 'deployment.version', - DocumentFileSize: 'document.file_size', - DocumentMimeType: 'document.mime_type', - DocumentsCount: 'documents.count', - DocumentsUploadType: 'documents.upload_type', - Error: 'error', - ErrorCode: 'error.code', - ErrorInternal: 'error.internal', - ErrorType: 'error.type', - EventName: 'event.name', - EventTimestamp: 'event.timestamp', - ExecutionBlocksExecuted: 'execution.blocks_executed', - ExecutionDurationMs: 'execution.duration_ms', - ExecutionErrorMessage: 'execution.error_message', - ExecutionHasErrors: 'execution.has_errors', - ExecutionStatus: 'execution.status', - ExecutionTotalCost: 'execution.total_cost', - ExecutionTrigger: 'execution.trigger', - FunctionExecutionTimeMs: 'function.execution_time_ms', - FunctionId: 'function.id', - FunctionName: 'function.name', - GenAiAgentId: 'gen_ai.agent.id', - GenAiAgentName: 'gen_ai.agent.name', - GenAiCostInput: 'gen_ai.cost.input', - GenAiCostOutput: 'gen_ai.cost.output', - GenAiCostTotal: 'gen_ai.cost.total', - GenAiInputMessages: 'gen_ai.input.messages', - GenAiOperationName: 'gen_ai.operation.name', - GenAiOutputMessages: 'gen_ai.output.messages', - GenAiRequestAssistantMessages: 'gen_ai.request.assistant_messages', - GenAiRequestContentBlocks: 'gen_ai.request.content_blocks', - GenAiRequestHasCacheControl: 'gen_ai.request.has_cache_control', - GenAiRequestImageBlocks: 'gen_ai.request.image_blocks', - GenAiRequestImageDataBytes: 'gen_ai.request.image_data_bytes', - GenAiRequestMaxMessageBlocks: 'gen_ai.request.max_message_blocks', - GenAiRequestMessagesCount: 'gen_ai.request.messages.count', - GenAiRequestModel: 'gen_ai.request.model', - GenAiRequestSystemChars: 'gen_ai.request.system_chars', - GenAiRequestTextBlocks: 'gen_ai.request.text_blocks', - GenAiRequestToolResultBlocks: 'gen_ai.request.tool_result_blocks', - GenAiRequestToolUseBlocks: 'gen_ai.request.tool_use_blocks', - GenAiRequestToolsCount: 'gen_ai.request.tools.count', - GenAiRequestUserMessages: 'gen_ai.request.user_messages', - GenAiSystem: 'gen_ai.system', - GenAiToolName: 'gen_ai.tool.name', - GenAiUsageCacheCreationTokens: 'gen_ai.usage.cache_creation_tokens', - GenAiUsageCacheReadTokens: 'gen_ai.usage.cache_read_tokens', - GenAiUsageInputTokens: 'gen_ai.usage.input_tokens', - GenAiUsageOutputTokens: 'gen_ai.usage.output_tokens', - GenAiUsageTotalTokens: 'gen_ai.usage.total_tokens', - GenAiWorkflowExecutionId: 'gen_ai.workflow.execution_id', - GenAiWorkflowId: 'gen_ai.workflow.id', - GenAiWorkflowName: 'gen_ai.workflow.name', - HostedKeyEnvVar: 'hosted_key.env_var', - HttpHost: 'http.host', - HttpMethod: 'http.method', - HttpPath: 'http.path', - HttpRemoteAddr: 'http.remote_addr', - HttpRequestContentLength: 'http.request.content_length', - HttpResponseBodyBytes: 'http.response.body_bytes', - HttpResponseContentLength: 'http.response.content_length', - HttpResponseHeadersMs: 'http.response.headers_ms', - HttpResponseTotalMs: 'http.response.total_ms', - HttpServerDurationMs: 'http.server.duration_ms', - HttpStatusCode: 'http.status_code', - HttpTarget: 'http.target', - HttpUrl: 'http.url', - HttpUserAgent: 'http.user_agent', - InvitationRole: 'invitation.role', - KnowledgeBaseId: 'knowledge_base.id', - KnowledgeBaseName: 'knowledge_base.name', - LlmErrorStage: 'llm.error_stage', - LlmRequestBodyBytes: 'llm.request.body_bytes', - LlmStreamBytes: 'llm.stream.bytes', - LlmStreamChunks: 'llm.stream.chunks', - LlmStreamFirstChunkBytes: 'llm.stream.first_chunk_bytes', - LlmStreamFirstChunkMs: 'llm.stream.first_chunk_ms', - LlmStreamOpenMs: 'llm.stream.open_ms', - LlmStreamTotalMs: 'llm.stream.total_ms', - LockAcquired: 'lock.acquired', - LockBackend: 'lock.backend', - LockTimedOut: 'lock.timed_out', - LockTimeoutMs: 'lock.timeout_ms', - LoopId: 'loop.id', - LoopIterations: 'loop.iterations', - LoopName: 'loop.name', - McpExecutionStatus: 'mcp.execution_status', - McpServerId: 'mcp.server_id', - McpServerName: 'mcp.server_name', - McpToolName: 'mcp.tool_name', - McpTransport: 'mcp.transport', - MemberRole: 'member.role', - MemoryContentBytes: 'memory.content_bytes', - MemoryFound: 'memory.found', - MemoryPath: 'memory.path', - MemoryRowCount: 'memory.row_count', - MessageId: 'message.id', - MessagingDestinationName: 'messaging.destination.name', - MessagingSystem: 'messaging.system', - ModelDurationMs: 'model.duration_ms', - ModelId: 'model.id', - ModelName: 'model.name', - MothershipOrigin: 'mothership.origin', - NetPeerName: 'net.peer.name', - OauthProvider: 'oauth.provider', - ParallelBranches: 'parallel.branches', - ParallelId: 'parallel.id', - ParallelName: 'parallel.name', - PrefsToolCount: 'prefs.tool_count', - ProcessingChunkSize: 'processing.chunk_size', - ProcessingRecipe: 'processing.recipe', - ProviderId: 'provider.id', - RateLimitAttempt: 'rate_limit.attempt', - RateLimitCount: 'rate_limit.count', - RateLimitDelayMs: 'rate_limit.delay_ms', - RateLimitLimit: 'rate_limit.limit', - RateLimitMaxRetries: 'rate_limit.max_retries', - RateLimitOutcome: 'rate_limit.outcome', - RateLimitRetryAfterMs: 'rate_limit.retry_after_ms', - RequestGoTraceId: 'request.go_trace_id', - RequestId: 'request.id', - RequiredVersion: 'required.version', - ResumeRequestBodyBytes: 'resume.request.body_bytes', - ResumeResultsCount: 'resume.results.count', - ResumeResultsDataBytes: 'resume.results.data_bytes', - ResumeResultsFailureCount: 'resume.results.failure_count', - ResumeResultsSuccessCount: 'resume.results.success_count', - RouterBackendName: 'router.backend_name', - RouterBedrockEnabled: 'router.bedrock_enabled', - RouterBedrockSupportedModel: 'router.bedrock_supported_model', - RouterId: 'router.id', - RouterName: 'router.name', - RouterSelectedBackend: 'router.selected_backend', - RouterSelectedPath: 'router.selected_path', - RunId: 'run.id', - SearchResultsCount: 'search.results_count', - ServiceInstanceId: 'service.instance.id', - ServiceName: 'service.name', - ServiceNamespace: 'service.namespace', - ServiceVersion: 'service.version', - SettleCompleted: 'settle.completed', - SettleTimeoutMs: 'settle.timeout_ms', - SettleWaitMs: 'settle.wait_ms', - SimOperation: 'sim.operation', - SimRequestId: 'sim.request_id', - SpanDurationMs: 'span.duration_ms', - SpanStatus: 'span.status', - SpanType: 'span.type', - StreamId: 'stream.id', - SubagentId: 'subagent.id', - SubagentOutcomeContentBytes: 'subagent.outcome.content_bytes', - SubagentOutcomeError: 'subagent.outcome.error', - SubagentOutcomeStructuredType: 'subagent.outcome.structured_type', - SubagentOutcomeSuccess: 'subagent.outcome.success', - SubagentOutcomeToolCallCount: 'subagent.outcome.tool_call_count', - TaskAge: 'task.age', - TaskDecisionCount: 'task.decision_count', - TaskErrorCount: 'task.error_count', - TaskFound: 'task.found', - TaskId: 'task.id', - TaskListLimit: 'task.list_limit', - TaskRows: 'task.rows', - TaskStatus: 'task.status', - TaskStepCount: 'task.step_count', - TelemetrySdkLanguage: 'telemetry.sdk.language', - TelemetrySdkName: 'telemetry.sdk.name', - TelemetrySdkVersion: 'telemetry.sdk.version', - TemplateId: 'template.id', - TemplateName: 'template.name', - ThrottleReason: 'throttle.reason', - ToolArgsBytes: 'tool.args.bytes', - ToolArgsCount: 'tool.args.count', - ToolArgsPreview: 'tool.args.preview', - ToolAsyncWaiterPollCount: 'tool.async_waiter.poll_count', - ToolAsyncWaiterPubsubDeliveries: 'tool.async_waiter.pubsub_deliveries', - ToolAsyncWaiterResolution: 'tool.async_waiter.resolution', - ToolCallId: 'tool.call_id', - ToolClientExecutable: 'tool.client_executable', - ToolCompletionReceived: 'tool.completion.received', - ToolConfirmationStatus: 'tool.confirmation.status', - ToolDurationMs: 'tool.duration_ms', - ToolErrorKind: 'tool.error_kind', - ToolExecutor: 'tool.executor', - ToolExternalService: 'tool.external.service', - ToolId: 'tool.id', - ToolName: 'tool.name', - ToolOutcome: 'tool.outcome', - ToolOutcomeMessage: 'tool.outcome.message', - ToolParentSpan: 'tool.parent_span', - ToolPayloadBytes: 'tool.payload.bytes', - ToolResultArtifact: 'tool.result.artifact', - ToolResultBytes: 'tool.result.bytes', - ToolResultSuccess: 'tool.result.success', - ToolScheduled: 'tool.scheduled', - ToolStatus: 'tool.status', - ToolStatusCode: 'tool.status_code', - ToolStoreStatus: 'tool.store_status', - ToolSync: 'tool.sync', - ToolTimeoutMs: 'tool.timeout_ms', - TraceAborted: 'trace.aborted', - TraceBilledTotalCost: 'trace.billed_total_cost', - TraceCacheReadTokens: 'trace.cache_read_tokens', - TraceCacheWriteTokens: 'trace.cache_write_tokens', - TraceDurationMs: 'trace.duration_ms', - TraceError: 'trace.error', - TraceGoId: 'trace.go_id', - TraceInputTokens: 'trace.input_tokens', - TraceModel: 'trace.model', - TraceOutcome: 'trace.outcome', - TraceOutputTokens: 'trace.output_tokens', - TraceProvider: 'trace.provider', - TraceRawTotalCost: 'trace.raw_total_cost', - TraceSpanCount: 'trace.span_count', - TraceToolCallCount: 'trace.tool_call_count', - UserAuthMethod: 'user.auth_method', - UserAuthProvider: 'user.auth_provider', - UserId: 'user.id', - WebhookId: 'webhook.id', - WebhookProvider: 'webhook.provider', - WebhookTriggerSuccess: 'webhook.trigger_success', - WorkflowBlockTypes: 'workflow.block_types', - WorkflowBlocksCount: 'workflow.blocks_count', - WorkflowCreatedId: 'workflow.created_id', - WorkflowDurationMs: 'workflow.duration_ms', - WorkflowEdgesCount: 'workflow.edges_count', - WorkflowExecutionId: 'workflow.execution_id', - WorkflowHasFolder: 'workflow.has_folder', - WorkflowHasWorkspace: 'workflow.has_workspace', - WorkflowId: 'workflow.id', - WorkflowLoopsCount: 'workflow.loops_count', - WorkflowName: 'workflow.name', - WorkflowNewId: 'workflow.new_id', - WorkflowParallelsCount: 'workflow.parallels_count', - WorkflowSourceId: 'workflow.source_id', - WorkflowTrigger: 'workflow.trigger', - WorkspaceId: 'workspace.id', - WorkspaceName: 'workspace.name', -} as const + AbortBackend: "abort.backend", + AbortFound: "abort.found", + AbortRedisResult: "abort.redis_result", + AnalyticsAborted: "analytics.aborted", + AnalyticsBilledTotalCost: "analytics.billed_total_cost", + AnalyticsCacheReadTokens: "analytics.cache_read_tokens", + AnalyticsCacheWriteTokens: "analytics.cache_write_tokens", + AnalyticsCustomerType: "analytics.customer_type", + AnalyticsDurationMs: "analytics.duration_ms", + AnalyticsError: "analytics.error", + AnalyticsInputTokens: "analytics.input_tokens", + AnalyticsModel: "analytics.model", + AnalyticsOutputTokens: "analytics.output_tokens", + AnalyticsProvider: "analytics.provider", + AnalyticsSource: "analytics.source", + AnalyticsToolCallCount: "analytics.tool_call_count", + ApiKeyId: "api_key.id", + ApiKeyName: "api_key.name", + AuthIncomingInternal: "auth.incoming_internal", + AuthKeyMatch: "auth.key.match", + AuthKeyPreview: "auth.key.preview", + AuthKeySource: "auth.key.source", + AuthKeyType: "auth.key.type", + AuthProvider: "auth.provider", + AuthValidateStatusCode: "auth.validate.status_code", + AwsRegion: "aws.region", + BedrockErrorCode: "bedrock.error_code", + BedrockModelId: "bedrock.model_id", + BedrockRequestBodyBytesRetry: "bedrock.request.body_bytes_retry", + BillingAttempts: "billing.attempts", + BillingChangeType: "billing.change_type", + BillingCostInputUsd: "billing.cost.input_usd", + BillingCostOutputUsd: "billing.cost.output_usd", + BillingCostTotalUsd: "billing.cost.total_usd", + BillingCostUsd: "billing.cost_usd", + BillingCustomerType: "billing.customer_type", + BillingDuplicate: "billing.duplicate", + BillingDurationMs: "billing.duration_ms", + BillingHasIdempotencyKey: "billing.has_idempotency_key", + BillingIdempotencyKey: "billing.idempotency_key", + BillingInterval: "billing.interval", + BillingIsMcp: "billing.is_mcp", + BillingLlmCost: "billing.llm_cost", + BillingNewPlan: "billing.new_plan", + BillingOutcome: "billing.outcome", + BillingPlan: "billing.plan", + BillingPreviousPlan: "billing.previous_plan", + BillingServiceCharges: "billing.service_charges", + BillingSource: "billing.source", + BillingTotalCost: "billing.total_cost", + BillingUsageCurrent: "billing.usage.current", + BillingUsageExceeded: "billing.usage.exceeded", + BillingUsageLimit: "billing.usage.limit", + BlockId: "block.id", + BlockName: "block.name", + BlockType: "block.type", + ChatActiveMessagesBytes: "chat.active_messages_bytes", + ChatActiveMessagesCount: "chat.active_messages_count", + ChatAppendBytes: "chat.append_bytes", + ChatAppendCount: "chat.append_count", + ChatArtifactKeys: "chat.artifact_keys", + ChatArtifactsBytes: "chat.artifacts_bytes", + ChatAuthType: "chat.auth_type", + ChatContextCount: "chat.context_count", + ChatContextUsage: "chat.context_usage", + ChatContinuationMessagesBefore: "chat.continuation.messages_before", + ChatContinuationToolResultBytes: "chat.continuation.tool_result_bytes", + ChatContinuationToolResultFailure: "chat.continuation.tool_result_failure", + ChatContinuationToolResultSuccess: "chat.continuation.tool_result_success", + ChatContinuationToolResults: "chat.continuation.tool_results", + ChatContinuationTotalToolCalls: "chat.continuation.total_tool_calls", + ChatExistingMessageCount: "chat.existing_message_count", + ChatFileAttachmentCount: "chat.file_attachment_count", + ChatFinalizeOutcome: "chat.finalize.outcome", + ChatFound: "chat.found", + ChatHasAssistantMessage: "chat.has_assistant_message", + ChatHasOutputConfigs: "chat.has_output_configs", + ChatId: "chat.id", + ChatMessageBytes: "chat.message_bytes", + ChatMessagesAfter: "chat.messages_after", + ChatMessagesBytes: "chat.messages_bytes", + ChatMessagesCount: "chat.messages_count", + ChatPersistOutcome: "chat.persist.outcome", + ChatPreexisting: "chat.preexisting", + ChatRollbackIndex: "chat.rollback_index", + ChatTokensUsed: "chat.tokens_used", + ChatType: "chat.type", + ChatUserMessageId: "chat.user_message_id", + CheckpointAge: "checkpoint.age", + CheckpointAttemptsBytes: "checkpoint.attempts_bytes", + CheckpointBytesAssistantToolUse: "checkpoint.bytes.assistant_tool_use", + CheckpointBytesCurrentMessages: "checkpoint.bytes.current_messages", + CheckpointBytesImmediateResults: "checkpoint.bytes.immediate_results", + CheckpointBytesPendingToolCalls: "checkpoint.bytes.pending_tool_calls", + CheckpointBytesProviderRequest: "checkpoint.bytes.provider_request", + CheckpointBytesRequestContext: "checkpoint.bytes.request_context", + CheckpointBytesToolUsage: "checkpoint.bytes.tool_usage", + CheckpointCachedCredentialsBytes: "checkpoint.cached_credentials_bytes", + CheckpointClaimed: "checkpoint.claimed", + CheckpointClaimedNow: "checkpoint.claimed_now", + CheckpointCompletedBytes: "checkpoint.completed_bytes", + CheckpointCompletedSteps: "checkpoint.completed_steps", + CheckpointCurrentMessages: "checkpoint.current_messages", + CheckpointDecisionsBytes: "checkpoint.decisions_bytes", + CheckpointFound: "checkpoint.found", + CheckpointFrames: "checkpoint.frames", + CheckpointId: "checkpoint.id", + CheckpointImmediateResults: "checkpoint.immediate_results", + CheckpointMessageId: "checkpoint.message_id", + CheckpointPendingBytes: "checkpoint.pending_bytes", + CheckpointPendingSteps: "checkpoint.pending_steps", + CheckpointPendingToolCount: "checkpoint.pending_tool_count", + CheckpointRows: "checkpoint.rows", + CheckpointTaskId: "checkpoint.task_id", + CheckpointTotalToolCalls: "checkpoint.total_tool_calls", + CheckpointWorkflowSnapshotBytes: "checkpoint.workflow_snapshot_bytes", + ClientVersion: "client.version", + ConditionId: "condition.id", + ConditionName: "condition.name", + ConditionResult: "condition.result", + ContextReduceBudgetChars: "context.reduce.budget_chars", + ContextReduceCaller: "context.reduce.caller", + ContextReduceDidReduce: "context.reduce.did_reduce", + ContextReduceInputChars: "context.reduce.input_chars", + ContextReduceInputMessages: "context.reduce.input_messages", + ContextReduceOutcome: "context.reduce.outcome", + ContextReduceOutputChars: "context.reduce.output_chars", + ContextReduceOutputMessages: "context.reduce.output_messages", + ContextReduced: "context.reduced", + ContextSummarizeInputChars: "context.summarize.input_chars", + ContextSummarizeOutputChars: "context.summarize.output_chars", + CopilotAbortControllerFired: "copilot.abort.controller_fired", + CopilotAbortGoMarkerOk: "copilot.abort.go_marker_ok", + CopilotAbortLocalAborted: "copilot.abort.local_aborted", + CopilotAbortMarkerWritten: "copilot.abort.marker_written", + CopilotAbortOutcome: "copilot.abort.outcome", + CopilotAbortUnknownReason: "copilot.abort.unknown_reason", + CopilotAsyncToolClaimedBy: "copilot.async_tool.claimed_by", + CopilotAsyncToolHasError: "copilot.async_tool.has_error", + CopilotAsyncToolIdsCount: "copilot.async_tool.ids_count", + CopilotAsyncToolStatus: "copilot.async_tool.status", + CopilotAsyncToolWorkerId: "copilot.async_tool.worker_id", + CopilotBranchKind: "copilot.branch.kind", + CopilotChatIsNew: "copilot.chat.is_new", + CopilotCommandsCount: "copilot.commands.count", + CopilotConfirmOutcome: "copilot.confirm.outcome", + CopilotContextsCount: "copilot.contexts.count", + CopilotFileAttachmentsCount: "copilot.file_attachments.count", + CopilotFinalizeOutcome: "copilot.finalize.outcome", + CopilotInterruptedPriorStream: "copilot.interrupted_prior_stream", + CopilotLeg: "copilot.leg", + CopilotMode: "copilot.mode", + CopilotOperation: "copilot.operation", + CopilotOutputFileBytes: "copilot.output_file.bytes", + CopilotOutputFileFormat: "copilot.output_file.format", + CopilotOutputFileId: "copilot.output_file.id", + CopilotOutputFileName: "copilot.output_file.name", + CopilotOutputFileOutcome: "copilot.output_file.outcome", + CopilotPendingStreamWaitMs: "copilot.pending_stream.wait_ms", + CopilotPrefetch: "copilot.prefetch", + CopilotPublisherClientDisconnected: "copilot.publisher.client_disconnected", + CopilotPublisherSawComplete: "copilot.publisher.saw_complete", + CopilotRecoveryLatestSeq: "copilot.recovery.latest_seq", + CopilotRecoveryOldestSeq: "copilot.recovery.oldest_seq", + CopilotRecoveryOutcome: "copilot.recovery.outcome", + CopilotRecoveryRequestedAfterSeq: "copilot.recovery.requested_after_seq", + CopilotRequestCancelReason: "copilot.request.cancel_reason", + CopilotRequestOutcome: "copilot.request.outcome", + CopilotResourceAttachmentsCount: "copilot.resource_attachments.count", + CopilotResourcesAborted: "copilot.resources.aborted", + CopilotResourcesOp: "copilot.resources.op", + CopilotResourcesRemovedCount: "copilot.resources.removed_count", + CopilotResourcesUpsertedCount: "copilot.resources.upserted_count", + CopilotResultContentBlocks: "copilot.result.content_blocks", + CopilotResultContentLength: "copilot.result.content_length", + CopilotResultToolCalls: "copilot.result.tool_calls", + CopilotResumeAfterCursor: "copilot.resume.after_cursor", + CopilotResumeDurationMs: "copilot.resume.duration_ms", + CopilotResumeEventCount: "copilot.resume.event_count", + CopilotResumeOutcome: "copilot.resume.outcome", + CopilotResumePollIterations: "copilot.resume.poll_iterations", + CopilotResumePreviewSessionCount: "copilot.resume.preview_session_count", + CopilotRoute: "copilot.route", + CopilotRunAgent: "copilot.run.agent", + CopilotRunHasCompletedAt: "copilot.run.has_completed_at", + CopilotRunHasError: "copilot.run.has_error", + CopilotRunModel: "copilot.run.model", + CopilotRunParentId: "copilot.run.parent_id", + CopilotRunProvider: "copilot.run.provider", + CopilotRunStatus: "copilot.run.status", + CopilotStopAppendedAssistant: "copilot.stop.appended_assistant", + CopilotStopBlocksCount: "copilot.stop.blocks_count", + CopilotStopContentLength: "copilot.stop.content_length", + CopilotStopOutcome: "copilot.stop.outcome", + CopilotStream: "copilot.stream", + CopilotSurface: "copilot.surface", + CopilotTableId: "copilot.table.id", + CopilotTableOutcome: "copilot.table.outcome", + CopilotTableRowCount: "copilot.table.row_count", + CopilotTableSourceContentBytes: "copilot.table.source.content_bytes", + CopilotTableSourceFormat: "copilot.table.source.format", + CopilotTableSourcePath: "copilot.table.source.path", + CopilotTraceSpanCount: "copilot.trace.span_count", + CopilotTransport: "copilot.transport", + CopilotUserMessagePreview: "copilot.user.message_preview", + CopilotValidateOutcome: "copilot.validate.outcome", + CopilotVfsFileExtension: "copilot.vfs.file.extension", + CopilotVfsFileMediaType: "copilot.vfs.file.media_type", + CopilotVfsFileName: "copilot.vfs.file.name", + CopilotVfsFileSizeBytes: "copilot.vfs.file.size_bytes", + CopilotVfsHasAlpha: "copilot.vfs.has_alpha", + CopilotVfsInputBytes: "copilot.vfs.input.bytes", + CopilotVfsInputHeight: "copilot.vfs.input.height", + CopilotVfsInputMediaTypeClaimed: "copilot.vfs.input.media_type_claimed", + CopilotVfsInputMediaTypeDetected: "copilot.vfs.input.media_type_detected", + CopilotVfsInputWidth: "copilot.vfs.input.width", + CopilotVfsMetadataFailed: "copilot.vfs.metadata.failed", + CopilotVfsOutcome: "copilot.vfs.outcome", + CopilotVfsOutputBytes: "copilot.vfs.output.bytes", + CopilotVfsOutputMediaType: "copilot.vfs.output.media_type", + CopilotVfsReadImageResized: "copilot.vfs.read.image.resized", + CopilotVfsReadOutcome: "copilot.vfs.read.outcome", + CopilotVfsReadOutputBytes: "copilot.vfs.read.output.bytes", + CopilotVfsReadOutputLines: "copilot.vfs.read.output.lines", + CopilotVfsReadOutputMediaType: "copilot.vfs.read.output.media_type", + CopilotVfsReadPath: "copilot.vfs.read.path", + CopilotVfsResizeAttempts: "copilot.vfs.resize.attempts", + CopilotVfsResizeChosenDimension: "copilot.vfs.resize.chosen_dimension", + CopilotVfsResizeChosenQuality: "copilot.vfs.resize.chosen_quality", + CopilotVfsResized: "copilot.vfs.resized", + CopilotVfsSharpLoadFailed: "copilot.vfs.sharp.load_failed", + CostDefaultCost: "cost.default_cost", + CredentialSetId: "credential_set.id", + CredentialSetName: "credential_set.name", + DbOperation: "db.operation", + DbSqlTable: "db.sql.table", + DbSystem: "db.system", + DeploymentEnvironment: "deployment.environment", + DeploymentVersion: "deployment.version", + DocumentFileSize: "document.file_size", + DocumentMimeType: "document.mime_type", + DocumentsCount: "documents.count", + DocumentsUploadType: "documents.upload_type", + Error: "error", + ErrorCode: "error.code", + ErrorInternal: "error.internal", + ErrorMessage: "error.message", + ErrorType: "error.type", + EventName: "event.name", + EventTimestamp: "event.timestamp", + ExecutionBlocksExecuted: "execution.blocks_executed", + ExecutionDurationMs: "execution.duration_ms", + ExecutionErrorMessage: "execution.error_message", + ExecutionHasErrors: "execution.has_errors", + ExecutionStatus: "execution.status", + ExecutionTotalCost: "execution.total_cost", + ExecutionTrigger: "execution.trigger", + FunctionExecutionTimeMs: "function.execution_time_ms", + FunctionId: "function.id", + FunctionName: "function.name", + GenAiAgentId: "gen_ai.agent.id", + GenAiAgentName: "gen_ai.agent.name", + GenAiCostInput: "gen_ai.cost.input", + GenAiCostOutput: "gen_ai.cost.output", + GenAiCostTotal: "gen_ai.cost.total", + GenAiInputMessages: "gen_ai.input.messages", + GenAiOperationName: "gen_ai.operation.name", + GenAiOutputMessages: "gen_ai.output.messages", + GenAiRequestAssistantMessages: "gen_ai.request.assistant_messages", + GenAiRequestContentBlocks: "gen_ai.request.content_blocks", + GenAiRequestHasCacheControl: "gen_ai.request.has_cache_control", + GenAiRequestImageBlocks: "gen_ai.request.image_blocks", + GenAiRequestImageDataBytes: "gen_ai.request.image_data_bytes", + GenAiRequestMaxMessageBlocks: "gen_ai.request.max_message_blocks", + GenAiRequestMessagesCount: "gen_ai.request.messages.count", + GenAiRequestModel: "gen_ai.request.model", + GenAiRequestSystemChars: "gen_ai.request.system_chars", + GenAiRequestTextBlocks: "gen_ai.request.text_blocks", + GenAiRequestToolResultBlocks: "gen_ai.request.tool_result_blocks", + GenAiRequestToolUseBlocks: "gen_ai.request.tool_use_blocks", + GenAiRequestToolsCount: "gen_ai.request.tools.count", + GenAiRequestUserMessages: "gen_ai.request.user_messages", + GenAiSystem: "gen_ai.system", + GenAiToolName: "gen_ai.tool.name", + GenAiUsageCacheCreationTokens: "gen_ai.usage.cache_creation_tokens", + GenAiUsageCacheReadTokens: "gen_ai.usage.cache_read_tokens", + GenAiUsageInputTokens: "gen_ai.usage.input_tokens", + GenAiUsageOutputTokens: "gen_ai.usage.output_tokens", + GenAiUsageTotalTokens: "gen_ai.usage.total_tokens", + GenAiWorkflowExecutionId: "gen_ai.workflow.execution_id", + GenAiWorkflowId: "gen_ai.workflow.id", + GenAiWorkflowName: "gen_ai.workflow.name", + HostedKeyEnvVar: "hosted_key.env_var", + HttpHost: "http.host", + HttpMethod: "http.method", + HttpPath: "http.path", + HttpRemoteAddr: "http.remote_addr", + HttpRequestContentLength: "http.request.content_length", + HttpResponseBodyBytes: "http.response.body_bytes", + HttpResponseContentLength: "http.response.content_length", + HttpResponseHeadersMs: "http.response.headers_ms", + HttpResponseTotalMs: "http.response.total_ms", + HttpServerDurationMs: "http.server.duration_ms", + HttpStatusCode: "http.status_code", + HttpTarget: "http.target", + HttpUrl: "http.url", + HttpUserAgent: "http.user_agent", + InvitationRole: "invitation.role", + KnowledgeBaseId: "knowledge_base.id", + KnowledgeBaseName: "knowledge_base.name", + LlmErrorStage: "llm.error_stage", + LlmRequestBodyBytes: "llm.request.body_bytes", + LlmStreamBytes: "llm.stream.bytes", + LlmStreamChunks: "llm.stream.chunks", + LlmStreamFirstChunkBytes: "llm.stream.first_chunk_bytes", + LlmStreamFirstChunkMs: "llm.stream.first_chunk_ms", + LlmStreamOpenMs: "llm.stream.open_ms", + LlmStreamTotalMs: "llm.stream.total_ms", + LockAcquired: "lock.acquired", + LockBackend: "lock.backend", + LockTimedOut: "lock.timed_out", + LockTimeoutMs: "lock.timeout_ms", + LoopId: "loop.id", + LoopIterations: "loop.iterations", + LoopName: "loop.name", + McpExecutionStatus: "mcp.execution_status", + McpServerId: "mcp.server_id", + McpServerName: "mcp.server_name", + McpToolName: "mcp.tool_name", + McpTransport: "mcp.transport", + MemberRole: "member.role", + MemoryContentBytes: "memory.content_bytes", + MemoryFound: "memory.found", + MemoryPath: "memory.path", + MemoryRowCount: "memory.row_count", + MessageId: "message.id", + MessagingDestinationName: "messaging.destination.name", + MessagingSystem: "messaging.system", + ModelDurationMs: "model.duration_ms", + ModelId: "model.id", + ModelName: "model.name", + MothershipOrigin: "mothership.origin", + NetPeerName: "net.peer.name", + OauthProvider: "oauth.provider", + ParallelBranches: "parallel.branches", + ParallelId: "parallel.id", + ParallelName: "parallel.name", + PrefsToolCount: "prefs.tool_count", + ProcessingChunkSize: "processing.chunk_size", + ProcessingRecipe: "processing.recipe", + ProviderId: "provider.id", + RateLimitAttempt: "rate_limit.attempt", + RateLimitCount: "rate_limit.count", + RateLimitDelayMs: "rate_limit.delay_ms", + RateLimitLimit: "rate_limit.limit", + RateLimitMaxRetries: "rate_limit.max_retries", + RateLimitOutcome: "rate_limit.outcome", + RateLimitRetryAfterMs: "rate_limit.retry_after_ms", + RequestGoTraceId: "request.go_trace_id", + RequestId: "request.id", + RequiredVersion: "required.version", + ResumeRequestBodyBytes: "resume.request.body_bytes", + ResumeResultsCount: "resume.results.count", + ResumeResultsDataBytes: "resume.results.data_bytes", + ResumeResultsFailureCount: "resume.results.failure_count", + ResumeResultsSuccessCount: "resume.results.success_count", + RouterBackendName: "router.backend_name", + RouterBedrockEnabled: "router.bedrock_enabled", + RouterBedrockSupportedModel: "router.bedrock_supported_model", + RouterId: "router.id", + RouterName: "router.name", + RouterSelectedBackend: "router.selected_backend", + RouterSelectedPath: "router.selected_path", + RunId: "run.id", + SearchResultsCount: "search.results_count", + ServiceInstanceId: "service.instance.id", + ServiceName: "service.name", + ServiceNamespace: "service.namespace", + ServiceVersion: "service.version", + SettleCompleted: "settle.completed", + SettleTimeoutMs: "settle.timeout_ms", + SettleWaitMs: "settle.wait_ms", + SimOperation: "sim.operation", + SimRequestId: "sim.request_id", + SpanDurationMs: "span.duration_ms", + SpanStatus: "span.status", + SpanType: "span.type", + StreamId: "stream.id", + SubagentId: "subagent.id", + SubagentOutcomeContentBytes: "subagent.outcome.content_bytes", + SubagentOutcomeError: "subagent.outcome.error", + SubagentOutcomeStructuredType: "subagent.outcome.structured_type", + SubagentOutcomeSuccess: "subagent.outcome.success", + SubagentOutcomeToolCallCount: "subagent.outcome.tool_call_count", + TaskAge: "task.age", + TaskDecisionCount: "task.decision_count", + TaskErrorCount: "task.error_count", + TaskFound: "task.found", + TaskId: "task.id", + TaskListLimit: "task.list_limit", + TaskRows: "task.rows", + TaskStatus: "task.status", + TaskStepCount: "task.step_count", + TelemetrySdkLanguage: "telemetry.sdk.language", + TelemetrySdkName: "telemetry.sdk.name", + TelemetrySdkVersion: "telemetry.sdk.version", + TemplateId: "template.id", + TemplateName: "template.name", + ThrottleReason: "throttle.reason", + ToolArgsBytes: "tool.args.bytes", + ToolArgsCount: "tool.args.count", + ToolArgsPreview: "tool.args.preview", + ToolAsyncWaiterPollCount: "tool.async_waiter.poll_count", + ToolAsyncWaiterPubsubDeliveries: "tool.async_waiter.pubsub_deliveries", + ToolAsyncWaiterResolution: "tool.async_waiter.resolution", + ToolCallId: "tool.call_id", + ToolClientExecutable: "tool.client_executable", + ToolCompletionReceived: "tool.completion.received", + ToolConfirmationStatus: "tool.confirmation.status", + ToolDurationMs: "tool.duration_ms", + ToolErrorKind: "tool.error_kind", + ToolExecutor: "tool.executor", + ToolExternalService: "tool.external.service", + ToolId: "tool.id", + ToolName: "tool.name", + ToolOutcome: "tool.outcome", + ToolOutcomeMessage: "tool.outcome.message", + ToolParentSpan: "tool.parent_span", + ToolPayloadBytes: "tool.payload.bytes", + ToolResultArtifact: "tool.result.artifact", + ToolResultBytes: "tool.result.bytes", + ToolResultSuccess: "tool.result.success", + ToolScheduled: "tool.scheduled", + ToolStatus: "tool.status", + ToolStatusCode: "tool.status_code", + ToolStoreStatus: "tool.store_status", + ToolSync: "tool.sync", + ToolTimeoutMs: "tool.timeout_ms", + TraceAborted: "trace.aborted", + TraceBilledTotalCost: "trace.billed_total_cost", + TraceCacheReadTokens: "trace.cache_read_tokens", + TraceCacheWriteTokens: "trace.cache_write_tokens", + TraceDurationMs: "trace.duration_ms", + TraceError: "trace.error", + TraceGoId: "trace.go_id", + TraceInputTokens: "trace.input_tokens", + TraceModel: "trace.model", + TraceOutcome: "trace.outcome", + TraceOutputTokens: "trace.output_tokens", + TraceProvider: "trace.provider", + TraceRawTotalCost: "trace.raw_total_cost", + TraceSpanCount: "trace.span_count", + TraceToolCallCount: "trace.tool_call_count", + UserAuthMethod: "user.auth_method", + UserAuthProvider: "user.auth_provider", + UserId: "user.id", + WebhookId: "webhook.id", + WebhookProvider: "webhook.provider", + WebhookTriggerSuccess: "webhook.trigger_success", + WorkflowBlockTypes: "workflow.block_types", + WorkflowBlocksCount: "workflow.blocks_count", + WorkflowCreatedId: "workflow.created_id", + WorkflowDurationMs: "workflow.duration_ms", + WorkflowEdgesCount: "workflow.edges_count", + WorkflowExecutionId: "workflow.execution_id", + WorkflowHasFolder: "workflow.has_folder", + WorkflowHasWorkspace: "workflow.has_workspace", + WorkflowId: "workflow.id", + WorkflowLoopsCount: "workflow.loops_count", + WorkflowName: "workflow.name", + WorkflowNewId: "workflow.new_id", + WorkflowParallelsCount: "workflow.parallels_count", + WorkflowSourceId: "workflow.source_id", + WorkflowTrigger: "workflow.trigger", + WorkspaceId: "workspace.id", + WorkspaceName: "workspace.name", +} as const; -export type TraceAttrKey = keyof typeof TraceAttr -export type TraceAttrValue = (typeof TraceAttr)[TraceAttrKey] +export type TraceAttrKey = keyof typeof TraceAttr; +export type TraceAttrValue = (typeof TraceAttr)[TraceAttrKey]; /** Readonly sorted list of every canonical custom attribute key. */ export const TraceAttrValues: readonly TraceAttrValue[] = [ - 'abort.backend', - 'abort.found', - 'abort.redis_result', - 'analytics.aborted', - 'analytics.billed_total_cost', - 'analytics.cache_read_tokens', - 'analytics.cache_write_tokens', - 'analytics.customer_type', - 'analytics.duration_ms', - 'analytics.error', - 'analytics.input_tokens', - 'analytics.model', - 'analytics.output_tokens', - 'analytics.provider', - 'analytics.source', - 'analytics.tool_call_count', - 'api_key.id', - 'api_key.name', - 'auth.incoming_internal', - 'auth.key.match', - 'auth.key.preview', - 'auth.key.source', - 'auth.key.type', - 'auth.provider', - 'auth.validate.status_code', - 'aws.region', - 'bedrock.error_code', - 'bedrock.model_id', - 'bedrock.request.body_bytes_retry', - 'billing.attempts', - 'billing.change_type', - 'billing.cost.input_usd', - 'billing.cost.output_usd', - 'billing.cost.total_usd', - 'billing.cost_usd', - 'billing.customer_type', - 'billing.duplicate', - 'billing.duration_ms', - 'billing.has_idempotency_key', - 'billing.idempotency_key', - 'billing.interval', - 'billing.is_mcp', - 'billing.llm_cost', - 'billing.new_plan', - 'billing.outcome', - 'billing.plan', - 'billing.previous_plan', - 'billing.service_charges', - 'billing.source', - 'billing.total_cost', - 'billing.usage.current', - 'billing.usage.exceeded', - 'billing.usage.limit', - 'block.id', - 'block.name', - 'block.type', - 'chat.active_messages_bytes', - 'chat.active_messages_count', - 'chat.append_bytes', - 'chat.append_count', - 'chat.artifact_keys', - 'chat.artifacts_bytes', - 'chat.auth_type', - 'chat.context_count', - 'chat.context_usage', - 'chat.continuation.messages_before', - 'chat.continuation.tool_result_bytes', - 'chat.continuation.tool_result_failure', - 'chat.continuation.tool_result_success', - 'chat.continuation.tool_results', - 'chat.continuation.total_tool_calls', - 'chat.existing_message_count', - 'chat.file_attachment_count', - 'chat.finalize.outcome', - 'chat.found', - 'chat.has_assistant_message', - 'chat.has_output_configs', - 'chat.id', - 'chat.message_bytes', - 'chat.messages_after', - 'chat.messages_bytes', - 'chat.messages_count', - 'chat.persist.outcome', - 'chat.preexisting', - 'chat.rollback_index', - 'chat.tokens_used', - 'chat.type', - 'chat.user_message_id', - 'checkpoint.age', - 'checkpoint.attempts_bytes', - 'checkpoint.bytes.assistant_tool_use', - 'checkpoint.bytes.current_messages', - 'checkpoint.bytes.immediate_results', - 'checkpoint.bytes.pending_tool_calls', - 'checkpoint.bytes.provider_request', - 'checkpoint.bytes.request_context', - 'checkpoint.bytes.tool_usage', - 'checkpoint.cached_credentials_bytes', - 'checkpoint.claimed', - 'checkpoint.claimed_now', - 'checkpoint.completed_bytes', - 'checkpoint.completed_steps', - 'checkpoint.current_messages', - 'checkpoint.decisions_bytes', - 'checkpoint.found', - 'checkpoint.frames', - 'checkpoint.id', - 'checkpoint.immediate_results', - 'checkpoint.message_id', - 'checkpoint.pending_bytes', - 'checkpoint.pending_steps', - 'checkpoint.pending_tool_count', - 'checkpoint.rows', - 'checkpoint.task_id', - 'checkpoint.total_tool_calls', - 'checkpoint.workflow_snapshot_bytes', - 'client.version', - 'condition.id', - 'condition.name', - 'condition.result', - 'context.reduce.budget_chars', - 'context.reduce.caller', - 'context.reduce.did_reduce', - 'context.reduce.input_chars', - 'context.reduce.input_messages', - 'context.reduce.outcome', - 'context.reduce.output_chars', - 'context.reduce.output_messages', - 'context.reduced', - 'context.summarize.input_chars', - 'context.summarize.output_chars', - 'copilot.abort.go_marker_ok', - 'copilot.abort.local_aborted', - 'copilot.abort.outcome', - 'copilot.async_tool.claimed_by', - 'copilot.async_tool.has_error', - 'copilot.async_tool.ids_count', - 'copilot.async_tool.status', - 'copilot.async_tool.worker_id', - 'copilot.branch.kind', - 'copilot.chat.is_new', - 'copilot.commands.count', - 'copilot.confirm.outcome', - 'copilot.contexts.count', - 'copilot.file_attachments.count', - 'copilot.finalize.outcome', - 'copilot.interrupted_prior_stream', - 'copilot.leg', - 'copilot.mode', - 'copilot.operation', - 'copilot.output_file.bytes', - 'copilot.output_file.format', - 'copilot.output_file.id', - 'copilot.output_file.name', - 'copilot.output_file.outcome', - 'copilot.pending_stream.wait_ms', - 'copilot.prefetch', - 'copilot.publisher.client_disconnected', - 'copilot.publisher.saw_complete', - 'copilot.recovery.latest_seq', - 'copilot.recovery.oldest_seq', - 'copilot.recovery.outcome', - 'copilot.recovery.requested_after_seq', - 'copilot.request.outcome', - 'copilot.resource_attachments.count', - 'copilot.resources.aborted', - 'copilot.resources.op', - 'copilot.resources.removed_count', - 'copilot.resources.upserted_count', - 'copilot.result.content_blocks', - 'copilot.result.content_length', - 'copilot.result.tool_calls', - 'copilot.resume.after_cursor', - 'copilot.resume.duration_ms', - 'copilot.resume.event_count', - 'copilot.resume.outcome', - 'copilot.resume.poll_iterations', - 'copilot.resume.preview_session_count', - 'copilot.route', - 'copilot.run.agent', - 'copilot.run.has_completed_at', - 'copilot.run.has_error', - 'copilot.run.model', - 'copilot.run.parent_id', - 'copilot.run.provider', - 'copilot.run.status', - 'copilot.stop.appended_assistant', - 'copilot.stop.blocks_count', - 'copilot.stop.content_length', - 'copilot.stop.outcome', - 'copilot.stream', - 'copilot.surface', - 'copilot.table.id', - 'copilot.table.outcome', - 'copilot.table.row_count', - 'copilot.table.source.content_bytes', - 'copilot.table.source.format', - 'copilot.table.source.path', - 'copilot.trace.span_count', - 'copilot.transport', - 'copilot.user.message_preview', - 'copilot.validate.outcome', - 'copilot.vfs.file.extension', - 'copilot.vfs.file.media_type', - 'copilot.vfs.file.name', - 'copilot.vfs.file.size_bytes', - 'copilot.vfs.has_alpha', - 'copilot.vfs.input.bytes', - 'copilot.vfs.input.height', - 'copilot.vfs.input.media_type_claimed', - 'copilot.vfs.input.media_type_detected', - 'copilot.vfs.input.width', - 'copilot.vfs.metadata.failed', - 'copilot.vfs.outcome', - 'copilot.vfs.output.bytes', - 'copilot.vfs.output.media_type', - 'copilot.vfs.read.image.resized', - 'copilot.vfs.read.outcome', - 'copilot.vfs.read.output.bytes', - 'copilot.vfs.read.output.lines', - 'copilot.vfs.read.output.media_type', - 'copilot.vfs.read.path', - 'copilot.vfs.resize.attempts', - 'copilot.vfs.resize.chosen_dimension', - 'copilot.vfs.resize.chosen_quality', - 'copilot.vfs.resized', - 'copilot.vfs.sharp.load_failed', - 'cost.default_cost', - 'credential_set.id', - 'credential_set.name', - 'db.operation', - 'db.sql.table', - 'db.system', - 'deployment.environment', - 'deployment.version', - 'document.file_size', - 'document.mime_type', - 'documents.count', - 'documents.upload_type', - 'error', - 'error.code', - 'error.internal', - 'error.type', - 'event.name', - 'event.timestamp', - 'execution.blocks_executed', - 'execution.duration_ms', - 'execution.error_message', - 'execution.has_errors', - 'execution.status', - 'execution.total_cost', - 'execution.trigger', - 'function.execution_time_ms', - 'function.id', - 'function.name', - 'gen_ai.agent.id', - 'gen_ai.agent.name', - 'gen_ai.cost.input', - 'gen_ai.cost.output', - 'gen_ai.cost.total', - 'gen_ai.input.messages', - 'gen_ai.operation.name', - 'gen_ai.output.messages', - 'gen_ai.request.assistant_messages', - 'gen_ai.request.content_blocks', - 'gen_ai.request.has_cache_control', - 'gen_ai.request.image_blocks', - 'gen_ai.request.image_data_bytes', - 'gen_ai.request.max_message_blocks', - 'gen_ai.request.messages.count', - 'gen_ai.request.model', - 'gen_ai.request.system_chars', - 'gen_ai.request.text_blocks', - 'gen_ai.request.tool_result_blocks', - 'gen_ai.request.tool_use_blocks', - 'gen_ai.request.tools.count', - 'gen_ai.request.user_messages', - 'gen_ai.system', - 'gen_ai.tool.name', - 'gen_ai.usage.cache_creation_tokens', - 'gen_ai.usage.cache_read_tokens', - 'gen_ai.usage.input_tokens', - 'gen_ai.usage.output_tokens', - 'gen_ai.usage.total_tokens', - 'gen_ai.workflow.execution_id', - 'gen_ai.workflow.id', - 'gen_ai.workflow.name', - 'hosted_key.env_var', - 'http.host', - 'http.method', - 'http.path', - 'http.remote_addr', - 'http.request.content_length', - 'http.response.body_bytes', - 'http.response.content_length', - 'http.response.headers_ms', - 'http.response.total_ms', - 'http.server.duration_ms', - 'http.status_code', - 'http.target', - 'http.url', - 'http.user_agent', - 'invitation.role', - 'knowledge_base.id', - 'knowledge_base.name', - 'llm.error_stage', - 'llm.request.body_bytes', - 'llm.stream.bytes', - 'llm.stream.chunks', - 'llm.stream.first_chunk_bytes', - 'llm.stream.first_chunk_ms', - 'llm.stream.open_ms', - 'llm.stream.total_ms', - 'lock.acquired', - 'lock.backend', - 'lock.timed_out', - 'lock.timeout_ms', - 'loop.id', - 'loop.iterations', - 'loop.name', - 'mcp.execution_status', - 'mcp.server_id', - 'mcp.server_name', - 'mcp.tool_name', - 'mcp.transport', - 'member.role', - 'memory.content_bytes', - 'memory.found', - 'memory.path', - 'memory.row_count', - 'message.id', - 'messaging.destination.name', - 'messaging.system', - 'model.duration_ms', - 'model.id', - 'model.name', - 'mothership.origin', - 'net.peer.name', - 'oauth.provider', - 'parallel.branches', - 'parallel.id', - 'parallel.name', - 'prefs.tool_count', - 'processing.chunk_size', - 'processing.recipe', - 'provider.id', - 'rate_limit.attempt', - 'rate_limit.count', - 'rate_limit.delay_ms', - 'rate_limit.limit', - 'rate_limit.max_retries', - 'rate_limit.outcome', - 'rate_limit.retry_after_ms', - 'request.go_trace_id', - 'request.id', - 'required.version', - 'resume.request.body_bytes', - 'resume.results.count', - 'resume.results.data_bytes', - 'resume.results.failure_count', - 'resume.results.success_count', - 'router.backend_name', - 'router.bedrock_enabled', - 'router.bedrock_supported_model', - 'router.id', - 'router.name', - 'router.selected_backend', - 'router.selected_path', - 'run.id', - 'search.results_count', - 'service.instance.id', - 'service.name', - 'service.namespace', - 'service.version', - 'settle.completed', - 'settle.timeout_ms', - 'settle.wait_ms', - 'sim.operation', - 'sim.request_id', - 'span.duration_ms', - 'span.status', - 'span.type', - 'stream.id', - 'subagent.id', - 'subagent.outcome.content_bytes', - 'subagent.outcome.error', - 'subagent.outcome.structured_type', - 'subagent.outcome.success', - 'subagent.outcome.tool_call_count', - 'task.age', - 'task.decision_count', - 'task.error_count', - 'task.found', - 'task.id', - 'task.list_limit', - 'task.rows', - 'task.status', - 'task.step_count', - 'telemetry.sdk.language', - 'telemetry.sdk.name', - 'telemetry.sdk.version', - 'template.id', - 'template.name', - 'throttle.reason', - 'tool.args.bytes', - 'tool.args.count', - 'tool.args.preview', - 'tool.async_waiter.poll_count', - 'tool.async_waiter.pubsub_deliveries', - 'tool.async_waiter.resolution', - 'tool.call_id', - 'tool.client_executable', - 'tool.completion.received', - 'tool.confirmation.status', - 'tool.duration_ms', - 'tool.error_kind', - 'tool.executor', - 'tool.external.service', - 'tool.id', - 'tool.name', - 'tool.outcome', - 'tool.outcome.message', - 'tool.parent_span', - 'tool.payload.bytes', - 'tool.result.artifact', - 'tool.result.bytes', - 'tool.result.success', - 'tool.scheduled', - 'tool.status', - 'tool.status_code', - 'tool.store_status', - 'tool.sync', - 'tool.timeout_ms', - 'trace.aborted', - 'trace.billed_total_cost', - 'trace.cache_read_tokens', - 'trace.cache_write_tokens', - 'trace.duration_ms', - 'trace.error', - 'trace.go_id', - 'trace.input_tokens', - 'trace.model', - 'trace.outcome', - 'trace.output_tokens', - 'trace.provider', - 'trace.raw_total_cost', - 'trace.span_count', - 'trace.tool_call_count', - 'user.auth_method', - 'user.auth_provider', - 'user.id', - 'webhook.id', - 'webhook.provider', - 'webhook.trigger_success', - 'workflow.block_types', - 'workflow.blocks_count', - 'workflow.created_id', - 'workflow.duration_ms', - 'workflow.edges_count', - 'workflow.execution_id', - 'workflow.has_folder', - 'workflow.has_workspace', - 'workflow.id', - 'workflow.loops_count', - 'workflow.name', - 'workflow.new_id', - 'workflow.parallels_count', - 'workflow.source_id', - 'workflow.trigger', - 'workspace.id', - 'workspace.name', -] as const + "abort.backend", + "abort.found", + "abort.redis_result", + "analytics.aborted", + "analytics.billed_total_cost", + "analytics.cache_read_tokens", + "analytics.cache_write_tokens", + "analytics.customer_type", + "analytics.duration_ms", + "analytics.error", + "analytics.input_tokens", + "analytics.model", + "analytics.output_tokens", + "analytics.provider", + "analytics.source", + "analytics.tool_call_count", + "api_key.id", + "api_key.name", + "auth.incoming_internal", + "auth.key.match", + "auth.key.preview", + "auth.key.source", + "auth.key.type", + "auth.provider", + "auth.validate.status_code", + "aws.region", + "bedrock.error_code", + "bedrock.model_id", + "bedrock.request.body_bytes_retry", + "billing.attempts", + "billing.change_type", + "billing.cost.input_usd", + "billing.cost.output_usd", + "billing.cost.total_usd", + "billing.cost_usd", + "billing.customer_type", + "billing.duplicate", + "billing.duration_ms", + "billing.has_idempotency_key", + "billing.idempotency_key", + "billing.interval", + "billing.is_mcp", + "billing.llm_cost", + "billing.new_plan", + "billing.outcome", + "billing.plan", + "billing.previous_plan", + "billing.service_charges", + "billing.source", + "billing.total_cost", + "billing.usage.current", + "billing.usage.exceeded", + "billing.usage.limit", + "block.id", + "block.name", + "block.type", + "chat.active_messages_bytes", + "chat.active_messages_count", + "chat.append_bytes", + "chat.append_count", + "chat.artifact_keys", + "chat.artifacts_bytes", + "chat.auth_type", + "chat.context_count", + "chat.context_usage", + "chat.continuation.messages_before", + "chat.continuation.tool_result_bytes", + "chat.continuation.tool_result_failure", + "chat.continuation.tool_result_success", + "chat.continuation.tool_results", + "chat.continuation.total_tool_calls", + "chat.existing_message_count", + "chat.file_attachment_count", + "chat.finalize.outcome", + "chat.found", + "chat.has_assistant_message", + "chat.has_output_configs", + "chat.id", + "chat.message_bytes", + "chat.messages_after", + "chat.messages_bytes", + "chat.messages_count", + "chat.persist.outcome", + "chat.preexisting", + "chat.rollback_index", + "chat.tokens_used", + "chat.type", + "chat.user_message_id", + "checkpoint.age", + "checkpoint.attempts_bytes", + "checkpoint.bytes.assistant_tool_use", + "checkpoint.bytes.current_messages", + "checkpoint.bytes.immediate_results", + "checkpoint.bytes.pending_tool_calls", + "checkpoint.bytes.provider_request", + "checkpoint.bytes.request_context", + "checkpoint.bytes.tool_usage", + "checkpoint.cached_credentials_bytes", + "checkpoint.claimed", + "checkpoint.claimed_now", + "checkpoint.completed_bytes", + "checkpoint.completed_steps", + "checkpoint.current_messages", + "checkpoint.decisions_bytes", + "checkpoint.found", + "checkpoint.frames", + "checkpoint.id", + "checkpoint.immediate_results", + "checkpoint.message_id", + "checkpoint.pending_bytes", + "checkpoint.pending_steps", + "checkpoint.pending_tool_count", + "checkpoint.rows", + "checkpoint.task_id", + "checkpoint.total_tool_calls", + "checkpoint.workflow_snapshot_bytes", + "client.version", + "condition.id", + "condition.name", + "condition.result", + "context.reduce.budget_chars", + "context.reduce.caller", + "context.reduce.did_reduce", + "context.reduce.input_chars", + "context.reduce.input_messages", + "context.reduce.outcome", + "context.reduce.output_chars", + "context.reduce.output_messages", + "context.reduced", + "context.summarize.input_chars", + "context.summarize.output_chars", + "copilot.abort.controller_fired", + "copilot.abort.go_marker_ok", + "copilot.abort.local_aborted", + "copilot.abort.marker_written", + "copilot.abort.outcome", + "copilot.abort.unknown_reason", + "copilot.async_tool.claimed_by", + "copilot.async_tool.has_error", + "copilot.async_tool.ids_count", + "copilot.async_tool.status", + "copilot.async_tool.worker_id", + "copilot.branch.kind", + "copilot.chat.is_new", + "copilot.commands.count", + "copilot.confirm.outcome", + "copilot.contexts.count", + "copilot.file_attachments.count", + "copilot.finalize.outcome", + "copilot.interrupted_prior_stream", + "copilot.leg", + "copilot.mode", + "copilot.operation", + "copilot.output_file.bytes", + "copilot.output_file.format", + "copilot.output_file.id", + "copilot.output_file.name", + "copilot.output_file.outcome", + "copilot.pending_stream.wait_ms", + "copilot.prefetch", + "copilot.publisher.client_disconnected", + "copilot.publisher.saw_complete", + "copilot.recovery.latest_seq", + "copilot.recovery.oldest_seq", + "copilot.recovery.outcome", + "copilot.recovery.requested_after_seq", + "copilot.request.cancel_reason", + "copilot.request.outcome", + "copilot.resource_attachments.count", + "copilot.resources.aborted", + "copilot.resources.op", + "copilot.resources.removed_count", + "copilot.resources.upserted_count", + "copilot.result.content_blocks", + "copilot.result.content_length", + "copilot.result.tool_calls", + "copilot.resume.after_cursor", + "copilot.resume.duration_ms", + "copilot.resume.event_count", + "copilot.resume.outcome", + "copilot.resume.poll_iterations", + "copilot.resume.preview_session_count", + "copilot.route", + "copilot.run.agent", + "copilot.run.has_completed_at", + "copilot.run.has_error", + "copilot.run.model", + "copilot.run.parent_id", + "copilot.run.provider", + "copilot.run.status", + "copilot.stop.appended_assistant", + "copilot.stop.blocks_count", + "copilot.stop.content_length", + "copilot.stop.outcome", + "copilot.stream", + "copilot.surface", + "copilot.table.id", + "copilot.table.outcome", + "copilot.table.row_count", + "copilot.table.source.content_bytes", + "copilot.table.source.format", + "copilot.table.source.path", + "copilot.trace.span_count", + "copilot.transport", + "copilot.user.message_preview", + "copilot.validate.outcome", + "copilot.vfs.file.extension", + "copilot.vfs.file.media_type", + "copilot.vfs.file.name", + "copilot.vfs.file.size_bytes", + "copilot.vfs.has_alpha", + "copilot.vfs.input.bytes", + "copilot.vfs.input.height", + "copilot.vfs.input.media_type_claimed", + "copilot.vfs.input.media_type_detected", + "copilot.vfs.input.width", + "copilot.vfs.metadata.failed", + "copilot.vfs.outcome", + "copilot.vfs.output.bytes", + "copilot.vfs.output.media_type", + "copilot.vfs.read.image.resized", + "copilot.vfs.read.outcome", + "copilot.vfs.read.output.bytes", + "copilot.vfs.read.output.lines", + "copilot.vfs.read.output.media_type", + "copilot.vfs.read.path", + "copilot.vfs.resize.attempts", + "copilot.vfs.resize.chosen_dimension", + "copilot.vfs.resize.chosen_quality", + "copilot.vfs.resized", + "copilot.vfs.sharp.load_failed", + "cost.default_cost", + "credential_set.id", + "credential_set.name", + "db.operation", + "db.sql.table", + "db.system", + "deployment.environment", + "deployment.version", + "document.file_size", + "document.mime_type", + "documents.count", + "documents.upload_type", + "error", + "error.code", + "error.internal", + "error.message", + "error.type", + "event.name", + "event.timestamp", + "execution.blocks_executed", + "execution.duration_ms", + "execution.error_message", + "execution.has_errors", + "execution.status", + "execution.total_cost", + "execution.trigger", + "function.execution_time_ms", + "function.id", + "function.name", + "gen_ai.agent.id", + "gen_ai.agent.name", + "gen_ai.cost.input", + "gen_ai.cost.output", + "gen_ai.cost.total", + "gen_ai.input.messages", + "gen_ai.operation.name", + "gen_ai.output.messages", + "gen_ai.request.assistant_messages", + "gen_ai.request.content_blocks", + "gen_ai.request.has_cache_control", + "gen_ai.request.image_blocks", + "gen_ai.request.image_data_bytes", + "gen_ai.request.max_message_blocks", + "gen_ai.request.messages.count", + "gen_ai.request.model", + "gen_ai.request.system_chars", + "gen_ai.request.text_blocks", + "gen_ai.request.tool_result_blocks", + "gen_ai.request.tool_use_blocks", + "gen_ai.request.tools.count", + "gen_ai.request.user_messages", + "gen_ai.system", + "gen_ai.tool.name", + "gen_ai.usage.cache_creation_tokens", + "gen_ai.usage.cache_read_tokens", + "gen_ai.usage.input_tokens", + "gen_ai.usage.output_tokens", + "gen_ai.usage.total_tokens", + "gen_ai.workflow.execution_id", + "gen_ai.workflow.id", + "gen_ai.workflow.name", + "hosted_key.env_var", + "http.host", + "http.method", + "http.path", + "http.remote_addr", + "http.request.content_length", + "http.response.body_bytes", + "http.response.content_length", + "http.response.headers_ms", + "http.response.total_ms", + "http.server.duration_ms", + "http.status_code", + "http.target", + "http.url", + "http.user_agent", + "invitation.role", + "knowledge_base.id", + "knowledge_base.name", + "llm.error_stage", + "llm.request.body_bytes", + "llm.stream.bytes", + "llm.stream.chunks", + "llm.stream.first_chunk_bytes", + "llm.stream.first_chunk_ms", + "llm.stream.open_ms", + "llm.stream.total_ms", + "lock.acquired", + "lock.backend", + "lock.timed_out", + "lock.timeout_ms", + "loop.id", + "loop.iterations", + "loop.name", + "mcp.execution_status", + "mcp.server_id", + "mcp.server_name", + "mcp.tool_name", + "mcp.transport", + "member.role", + "memory.content_bytes", + "memory.found", + "memory.path", + "memory.row_count", + "message.id", + "messaging.destination.name", + "messaging.system", + "model.duration_ms", + "model.id", + "model.name", + "mothership.origin", + "net.peer.name", + "oauth.provider", + "parallel.branches", + "parallel.id", + "parallel.name", + "prefs.tool_count", + "processing.chunk_size", + "processing.recipe", + "provider.id", + "rate_limit.attempt", + "rate_limit.count", + "rate_limit.delay_ms", + "rate_limit.limit", + "rate_limit.max_retries", + "rate_limit.outcome", + "rate_limit.retry_after_ms", + "request.go_trace_id", + "request.id", + "required.version", + "resume.request.body_bytes", + "resume.results.count", + "resume.results.data_bytes", + "resume.results.failure_count", + "resume.results.success_count", + "router.backend_name", + "router.bedrock_enabled", + "router.bedrock_supported_model", + "router.id", + "router.name", + "router.selected_backend", + "router.selected_path", + "run.id", + "search.results_count", + "service.instance.id", + "service.name", + "service.namespace", + "service.version", + "settle.completed", + "settle.timeout_ms", + "settle.wait_ms", + "sim.operation", + "sim.request_id", + "span.duration_ms", + "span.status", + "span.type", + "stream.id", + "subagent.id", + "subagent.outcome.content_bytes", + "subagent.outcome.error", + "subagent.outcome.structured_type", + "subagent.outcome.success", + "subagent.outcome.tool_call_count", + "task.age", + "task.decision_count", + "task.error_count", + "task.found", + "task.id", + "task.list_limit", + "task.rows", + "task.status", + "task.step_count", + "telemetry.sdk.language", + "telemetry.sdk.name", + "telemetry.sdk.version", + "template.id", + "template.name", + "throttle.reason", + "tool.args.bytes", + "tool.args.count", + "tool.args.preview", + "tool.async_waiter.poll_count", + "tool.async_waiter.pubsub_deliveries", + "tool.async_waiter.resolution", + "tool.call_id", + "tool.client_executable", + "tool.completion.received", + "tool.confirmation.status", + "tool.duration_ms", + "tool.error_kind", + "tool.executor", + "tool.external.service", + "tool.id", + "tool.name", + "tool.outcome", + "tool.outcome.message", + "tool.parent_span", + "tool.payload.bytes", + "tool.result.artifact", + "tool.result.bytes", + "tool.result.success", + "tool.scheduled", + "tool.status", + "tool.status_code", + "tool.store_status", + "tool.sync", + "tool.timeout_ms", + "trace.aborted", + "trace.billed_total_cost", + "trace.cache_read_tokens", + "trace.cache_write_tokens", + "trace.duration_ms", + "trace.error", + "trace.go_id", + "trace.input_tokens", + "trace.model", + "trace.outcome", + "trace.output_tokens", + "trace.provider", + "trace.raw_total_cost", + "trace.span_count", + "trace.tool_call_count", + "user.auth_method", + "user.auth_provider", + "user.id", + "webhook.id", + "webhook.provider", + "webhook.trigger_success", + "workflow.block_types", + "workflow.blocks_count", + "workflow.created_id", + "workflow.duration_ms", + "workflow.edges_count", + "workflow.execution_id", + "workflow.has_folder", + "workflow.has_workspace", + "workflow.id", + "workflow.loops_count", + "workflow.name", + "workflow.new_id", + "workflow.parallels_count", + "workflow.source_id", + "workflow.trigger", + "workspace.id", + "workspace.name", +] as const; diff --git a/apps/sim/lib/copilot/generated/trace-events-v1.ts b/apps/sim/lib/copilot/generated/trace-events-v1.ts new file mode 100644 index 00000000000..2a23d7e3f38 --- /dev/null +++ b/apps/sim/lib/copilot/generated/trace-events-v1.ts @@ -0,0 +1,44 @@ +// AUTO-GENERATED FILE. DO NOT EDIT. +// +// Source: copilot/copilot/contracts/trace-events-v1.schema.json +// Regenerate with: bun run trace-events-contract:generate +// +// Canonical mothership OTel span event names. Call sites should +// reference `TraceEvent.` (e.g. +// `TraceEvent.RequestCancelled`) rather than raw string literals, +// so the Go-side contract is the single source of truth and typos +// become compile errors. + +export const TraceEvent = { + BedrockInvokeRetryWithoutImages: "bedrock.invoke.retry_without_images", + CopilotOutputFileError: "copilot.output_file.error", + CopilotTableError: "copilot.table.error", + CopilotVfsParseFailed: "copilot.vfs.parse_failed", + CopilotVfsResizeAttempt: "copilot.vfs.resize_attempt", + CopilotVfsResizeAttemptFailed: "copilot.vfs.resize_attempt_failed", + LlmInvokeSent: "llm.invoke.sent", + LlmStreamFirstChunk: "llm.stream.first_chunk", + LlmStreamOpened: "llm.stream.opened", + PgNotifyFailed: "pg_notify_failed", + RedisSubscribed: "redis.subscribed", + RequestCancelled: "request.cancelled", +} as const; + +export type TraceEventKey = keyof typeof TraceEvent; +export type TraceEventValue = (typeof TraceEvent)[TraceEventKey]; + +/** Readonly sorted list of every canonical event name. */ +export const TraceEventValues: readonly TraceEventValue[] = [ + "bedrock.invoke.retry_without_images", + "copilot.output_file.error", + "copilot.table.error", + "copilot.vfs.parse_failed", + "copilot.vfs.resize_attempt", + "copilot.vfs.resize_attempt_failed", + "llm.invoke.sent", + "llm.stream.first_chunk", + "llm.stream.opened", + "pg_notify_failed", + "redis.subscribed", + "request.cancelled", +] as const; diff --git a/apps/sim/lib/copilot/generated/trace-spans-v1.ts b/apps/sim/lib/copilot/generated/trace-spans-v1.ts index 1234b81d3a0..7c767e7735c 100644 --- a/apps/sim/lib/copilot/generated/trace-spans-v1.ts +++ b/apps/sim/lib/copilot/generated/trace-spans-v1.ts @@ -9,137 +9,143 @@ // single source of truth and typos become compile errors. export const TraceSpan = { - AnthropicCountTokens: 'anthropic.count_tokens', - AsyncToolStoreSet: 'async_tool_store.set', - AuthRateLimitRecord: 'auth.rate_limit.record', - AuthValidateKey: 'auth.validate_key', - ChatContinueWithToolResults: 'chat.continue_with_tool_results', - ChatExplicitAbortConsume: 'chat.explicit_abort.consume', - ChatExplicitAbortMark: 'chat.explicit_abort.mark', - ChatExplicitAbortPeek: 'chat.explicit_abort.peek', - ChatGateAcquire: 'chat.gate.acquire', - ChatPersistAfterDone: 'chat.persist_after_done', - ChatSetup: 'chat.setup', - ContextReduce: 'context.reduce', - ContextSummarizeChunk: 'context.summarize_chunk', - CopilotAnalyticsFlush: 'copilot.analytics.flush', - CopilotAnalyticsSaveRequest: 'copilot.analytics.save_request', - CopilotAnalyticsUpdateBilling: 'copilot.analytics.update_billing', - CopilotAsyncRunsClaimCompleted: 'copilot.async_runs.claim_completed', - CopilotAsyncRunsCreateRunCheckpoint: 'copilot.async_runs.create_run_checkpoint', - CopilotAsyncRunsCreateRunSegment: 'copilot.async_runs.create_run_segment', - CopilotAsyncRunsGetAsyncToolCall: 'copilot.async_runs.get_async_tool_call', - CopilotAsyncRunsGetLatestForExecution: 'copilot.async_runs.get_latest_for_execution', - CopilotAsyncRunsGetLatestForStream: 'copilot.async_runs.get_latest_for_stream', - CopilotAsyncRunsGetMany: 'copilot.async_runs.get_many', - CopilotAsyncRunsGetRunSegment: 'copilot.async_runs.get_run_segment', - CopilotAsyncRunsListForRun: 'copilot.async_runs.list_for_run', - CopilotAsyncRunsMarkAsyncToolStatus: 'copilot.async_runs.mark_async_tool_status', - CopilotAsyncRunsReleaseClaim: 'copilot.async_runs.release_claim', - CopilotAsyncRunsUpdateRunStatus: 'copilot.async_runs.update_run_status', - CopilotAsyncRunsUpsertAsyncToolCall: 'copilot.async_runs.upsert_async_tool_call', - CopilotAuthValidateApiKey: 'copilot.auth.validate_api_key', - CopilotBillingUpdateCost: 'copilot.billing.update_cost', - CopilotChatAbortStream: 'copilot.chat.abort_stream', - CopilotChatAbortWaitSettle: 'copilot.chat.abort_wait_settle', - CopilotChatAcquirePendingStreamLock: 'copilot.chat.acquire_pending_stream_lock', - CopilotChatBuildExecutionContext: 'copilot.chat.build_execution_context', - CopilotChatBuildPayload: 'copilot.chat.build_payload', - CopilotChatBuildWorkspaceContext: 'copilot.chat.build_workspace_context', - CopilotChatFinalizeAssistantTurn: 'copilot.chat.finalize_assistant_turn', - CopilotChatPersistUserMessage: 'copilot.chat.persist_user_message', - CopilotChatResolveAgentContexts: 'copilot.chat.resolve_agent_contexts', - CopilotChatResolveBranch: 'copilot.chat.resolve_branch', - CopilotChatResolveOrCreateChat: 'copilot.chat.resolve_or_create_chat', - CopilotChatStopStream: 'copilot.chat.stop_stream', - CopilotConfirmToolResult: 'copilot.confirm.tool_result', - CopilotFinalizeStream: 'copilot.finalize_stream', - CopilotRecoveryCheckReplayGap: 'copilot.recovery.check_replay_gap', - CopilotResumeRequest: 'copilot.resume.request', - CopilotSubagentExecute: 'copilot.subagent.execute', - CopilotToolWaitForClientResult: 'copilot.tool.wait_for_client_result', - CopilotToolsHandleResourceSideEffects: 'copilot.tools.handle_resource_side_effects', - CopilotToolsWriteCsvToTable: 'copilot.tools.write_csv_to_table', - CopilotToolsWriteOutputFile: 'copilot.tools.write_output_file', - CopilotToolsWriteOutputTable: 'copilot.tools.write_output_table', - CopilotVfsPrepareImage: 'copilot.vfs.prepare_image', - CopilotVfsReadFile: 'copilot.vfs.read_file', - GenAiAgentExecute: 'gen_ai.agent.execute', - LlmStream: 'llm.stream', - ProviderRouterCountTokens: 'provider.router.count_tokens', - ProviderRouterRoute: 'provider.router.route', - SimUpdateCost: 'sim.update_cost', - SimValidateApiKey: 'sim.validate_api_key', - ToolAsyncWaiterWait: 'tool.async_waiter.wait', - ToolExecute: 'tool.execute', -} as const + AnthropicCountTokens: "anthropic.count_tokens", + AsyncToolStoreSet: "async_tool_store.set", + AuthRateLimitRecord: "auth.rate_limit.record", + AuthValidateKey: "auth.validate_key", + ChatContinueWithToolResults: "chat.continue_with_tool_results", + ChatExplicitAbortConsume: "chat.explicit_abort.consume", + ChatExplicitAbortFlushPausedBilling: "chat.explicit_abort.flush_paused_billing", + ChatExplicitAbortHandle: "chat.explicit_abort.handle", + ChatExplicitAbortMark: "chat.explicit_abort.mark", + ChatExplicitAbortPeek: "chat.explicit_abort.peek", + ChatGateAcquire: "chat.gate.acquire", + ChatPersistAfterDone: "chat.persist_after_done", + ChatSetup: "chat.setup", + ContextReduce: "context.reduce", + ContextSummarizeChunk: "context.summarize_chunk", + CopilotAnalyticsFlush: "copilot.analytics.flush", + CopilotAnalyticsSaveRequest: "copilot.analytics.save_request", + CopilotAnalyticsUpdateBilling: "copilot.analytics.update_billing", + CopilotAsyncRunsClaimCompleted: "copilot.async_runs.claim_completed", + CopilotAsyncRunsCreateRunCheckpoint: "copilot.async_runs.create_run_checkpoint", + CopilotAsyncRunsCreateRunSegment: "copilot.async_runs.create_run_segment", + CopilotAsyncRunsGetAsyncToolCall: "copilot.async_runs.get_async_tool_call", + CopilotAsyncRunsGetLatestForExecution: "copilot.async_runs.get_latest_for_execution", + CopilotAsyncRunsGetLatestForStream: "copilot.async_runs.get_latest_for_stream", + CopilotAsyncRunsGetMany: "copilot.async_runs.get_many", + CopilotAsyncRunsGetRunSegment: "copilot.async_runs.get_run_segment", + CopilotAsyncRunsListForRun: "copilot.async_runs.list_for_run", + CopilotAsyncRunsMarkAsyncToolStatus: "copilot.async_runs.mark_async_tool_status", + CopilotAsyncRunsReleaseClaim: "copilot.async_runs.release_claim", + CopilotAsyncRunsUpdateRunStatus: "copilot.async_runs.update_run_status", + CopilotAsyncRunsUpsertAsyncToolCall: "copilot.async_runs.upsert_async_tool_call", + CopilotAuthValidateApiKey: "copilot.auth.validate_api_key", + CopilotBillingUpdateCost: "copilot.billing.update_cost", + CopilotChatAbortActiveStream: "copilot.chat.abort_active_stream", + CopilotChatAbortStream: "copilot.chat.abort_stream", + CopilotChatAbortWaitSettle: "copilot.chat.abort_wait_settle", + CopilotChatAcquirePendingStreamLock: "copilot.chat.acquire_pending_stream_lock", + CopilotChatBuildExecutionContext: "copilot.chat.build_execution_context", + CopilotChatBuildPayload: "copilot.chat.build_payload", + CopilotChatBuildWorkspaceContext: "copilot.chat.build_workspace_context", + CopilotChatFinalizeAssistantTurn: "copilot.chat.finalize_assistant_turn", + CopilotChatPersistUserMessage: "copilot.chat.persist_user_message", + CopilotChatResolveAgentContexts: "copilot.chat.resolve_agent_contexts", + CopilotChatResolveBranch: "copilot.chat.resolve_branch", + CopilotChatResolveOrCreateChat: "copilot.chat.resolve_or_create_chat", + CopilotChatStopStream: "copilot.chat.stop_stream", + CopilotConfirmToolResult: "copilot.confirm.tool_result", + CopilotFinalizeStream: "copilot.finalize_stream", + CopilotRecoveryCheckReplayGap: "copilot.recovery.check_replay_gap", + CopilotResumeRequest: "copilot.resume.request", + CopilotSubagentExecute: "copilot.subagent.execute", + CopilotToolWaitForClientResult: "copilot.tool.wait_for_client_result", + CopilotToolsHandleResourceSideEffects: "copilot.tools.handle_resource_side_effects", + CopilotToolsWriteCsvToTable: "copilot.tools.write_csv_to_table", + CopilotToolsWriteOutputFile: "copilot.tools.write_output_file", + CopilotToolsWriteOutputTable: "copilot.tools.write_output_table", + CopilotVfsPrepareImage: "copilot.vfs.prepare_image", + CopilotVfsReadFile: "copilot.vfs.read_file", + GenAiAgentExecute: "gen_ai.agent.execute", + LlmStream: "llm.stream", + ProviderRouterCountTokens: "provider.router.count_tokens", + ProviderRouterRoute: "provider.router.route", + SimUpdateCost: "sim.update_cost", + SimValidateApiKey: "sim.validate_api_key", + ToolAsyncWaiterWait: "tool.async_waiter.wait", + ToolExecute: "tool.execute", +} as const; -export type TraceSpanKey = keyof typeof TraceSpan -export type TraceSpanValue = (typeof TraceSpan)[TraceSpanKey] +export type TraceSpanKey = keyof typeof TraceSpan; +export type TraceSpanValue = (typeof TraceSpan)[TraceSpanKey]; /** Readonly sorted list of every canonical span name. */ export const TraceSpanValues: readonly TraceSpanValue[] = [ - 'anthropic.count_tokens', - 'async_tool_store.set', - 'auth.rate_limit.record', - 'auth.validate_key', - 'chat.continue_with_tool_results', - 'chat.explicit_abort.consume', - 'chat.explicit_abort.mark', - 'chat.explicit_abort.peek', - 'chat.gate.acquire', - 'chat.persist_after_done', - 'chat.setup', - 'context.reduce', - 'context.summarize_chunk', - 'copilot.analytics.flush', - 'copilot.analytics.save_request', - 'copilot.analytics.update_billing', - 'copilot.async_runs.claim_completed', - 'copilot.async_runs.create_run_checkpoint', - 'copilot.async_runs.create_run_segment', - 'copilot.async_runs.get_async_tool_call', - 'copilot.async_runs.get_latest_for_execution', - 'copilot.async_runs.get_latest_for_stream', - 'copilot.async_runs.get_many', - 'copilot.async_runs.get_run_segment', - 'copilot.async_runs.list_for_run', - 'copilot.async_runs.mark_async_tool_status', - 'copilot.async_runs.release_claim', - 'copilot.async_runs.update_run_status', - 'copilot.async_runs.upsert_async_tool_call', - 'copilot.auth.validate_api_key', - 'copilot.billing.update_cost', - 'copilot.chat.abort_stream', - 'copilot.chat.abort_wait_settle', - 'copilot.chat.acquire_pending_stream_lock', - 'copilot.chat.build_execution_context', - 'copilot.chat.build_payload', - 'copilot.chat.build_workspace_context', - 'copilot.chat.finalize_assistant_turn', - 'copilot.chat.persist_user_message', - 'copilot.chat.resolve_agent_contexts', - 'copilot.chat.resolve_branch', - 'copilot.chat.resolve_or_create_chat', - 'copilot.chat.stop_stream', - 'copilot.confirm.tool_result', - 'copilot.finalize_stream', - 'copilot.recovery.check_replay_gap', - 'copilot.resume.request', - 'copilot.subagent.execute', - 'copilot.tool.wait_for_client_result', - 'copilot.tools.handle_resource_side_effects', - 'copilot.tools.write_csv_to_table', - 'copilot.tools.write_output_file', - 'copilot.tools.write_output_table', - 'copilot.vfs.prepare_image', - 'copilot.vfs.read_file', - 'gen_ai.agent.execute', - 'llm.stream', - 'provider.router.count_tokens', - 'provider.router.route', - 'sim.update_cost', - 'sim.validate_api_key', - 'tool.async_waiter.wait', - 'tool.execute', -] as const + "anthropic.count_tokens", + "async_tool_store.set", + "auth.rate_limit.record", + "auth.validate_key", + "chat.continue_with_tool_results", + "chat.explicit_abort.consume", + "chat.explicit_abort.flush_paused_billing", + "chat.explicit_abort.handle", + "chat.explicit_abort.mark", + "chat.explicit_abort.peek", + "chat.gate.acquire", + "chat.persist_after_done", + "chat.setup", + "context.reduce", + "context.summarize_chunk", + "copilot.analytics.flush", + "copilot.analytics.save_request", + "copilot.analytics.update_billing", + "copilot.async_runs.claim_completed", + "copilot.async_runs.create_run_checkpoint", + "copilot.async_runs.create_run_segment", + "copilot.async_runs.get_async_tool_call", + "copilot.async_runs.get_latest_for_execution", + "copilot.async_runs.get_latest_for_stream", + "copilot.async_runs.get_many", + "copilot.async_runs.get_run_segment", + "copilot.async_runs.list_for_run", + "copilot.async_runs.mark_async_tool_status", + "copilot.async_runs.release_claim", + "copilot.async_runs.update_run_status", + "copilot.async_runs.upsert_async_tool_call", + "copilot.auth.validate_api_key", + "copilot.billing.update_cost", + "copilot.chat.abort_active_stream", + "copilot.chat.abort_stream", + "copilot.chat.abort_wait_settle", + "copilot.chat.acquire_pending_stream_lock", + "copilot.chat.build_execution_context", + "copilot.chat.build_payload", + "copilot.chat.build_workspace_context", + "copilot.chat.finalize_assistant_turn", + "copilot.chat.persist_user_message", + "copilot.chat.resolve_agent_contexts", + "copilot.chat.resolve_branch", + "copilot.chat.resolve_or_create_chat", + "copilot.chat.stop_stream", + "copilot.confirm.tool_result", + "copilot.finalize_stream", + "copilot.recovery.check_replay_gap", + "copilot.resume.request", + "copilot.subagent.execute", + "copilot.tool.wait_for_client_result", + "copilot.tools.handle_resource_side_effects", + "copilot.tools.write_csv_to_table", + "copilot.tools.write_output_file", + "copilot.tools.write_output_table", + "copilot.vfs.prepare_image", + "copilot.vfs.read_file", + "gen_ai.agent.execute", + "llm.stream", + "provider.router.count_tokens", + "provider.router.route", + "sim.update_cost", + "sim.validate_api_key", + "tool.async_waiter.wait", + "tool.execute", +] as const; diff --git a/apps/sim/lib/copilot/request/go/fetch.ts b/apps/sim/lib/copilot/request/go/fetch.ts index f0eac9d6e64..fafcce68f13 100644 --- a/apps/sim/lib/copilot/request/go/fetch.ts +++ b/apps/sim/lib/copilot/request/go/fetch.ts @@ -1,6 +1,7 @@ import { type Context, context, SpanStatusCode, trace } from '@opentelemetry/api' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { traceHeaders } from '@/lib/copilot/request/go/propagation' +import { CopilotLeg } from '@/lib/copilot/generated/trace-attribute-values-v1' // Lazy tracer resolution: module-level `trace.getTracer()` can be evaluated // before `instrumentation-node.ts` installs the TracerProvider under @@ -47,7 +48,7 @@ export async function fetchGo(url: string, options: OutboundFetchOptions = {}): [TraceAttr.HttpUrl]: url, [TraceAttr.HttpTarget]: pathname, [TraceAttr.NetPeerName]: parsed?.host ?? '', - [TraceAttr.CopilotLeg]: 'sim_to_go', + [TraceAttr.CopilotLeg]: CopilotLeg.SimToGo, ...(operation ? { [TraceAttr.CopilotOperation]: operation } : {}), ...(attributes ?? {}), }, diff --git a/apps/sim/lib/copilot/request/lifecycle/start.ts b/apps/sim/lib/copilot/request/lifecycle/start.ts index 618cc8b7108..a6b4c7a9c0d 100644 --- a/apps/sim/lib/copilot/request/lifecycle/start.ts +++ b/apps/sim/lib/copilot/request/lifecycle/start.ts @@ -9,8 +9,16 @@ import { MothershipStreamV1EventType, MothershipStreamV1SessionKind, } from '@/lib/copilot/generated/mothership-stream-v1' -import { RequestTraceV1Outcome } from '@/lib/copilot/generated/request-trace-v1' +import { + RequestTraceV1Outcome, + RequestTraceV1SpanStatus, +} from '@/lib/copilot/generated/request-trace-v1' +import { + CopilotRequestCancelReason, + type CopilotRequestCancelReasonValue, +} from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' +import { TraceEvent } from '@/lib/copilot/generated/trace-events-v1' import { finalizeStream } from '@/lib/copilot/request/lifecycle/finalize' import type { CopilotLifecycleOptions } from '@/lib/copilot/request/lifecycle/run' import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run' @@ -19,6 +27,7 @@ import { cleanupAbortMarker, clearFilePreviewSessions, registerActiveStream, + isExplicitStopReason, releasePendingChatStream, resetBuffer, StreamWriter, @@ -107,6 +116,76 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS const publisher = new StreamWriter({ streamId, chatId, requestId }) + /** + * Classifies a cancelled outcome into one of the closed-vocabulary + * `CopilotRequestCancelReason` values, and records the result on the + * active OTel root span (attribute + event). + * + * Classification rules: + * - `signal.reason` is in the known explicit-stop set (see + * `AbortReason.*`) → `ExplicitStop`. + * - Otherwise, `publisher.clientDisconnected` → `ClientDisconnect`. + * - Otherwise → `Unknown`, which is a latent bug: the stream aborted + * with a reason we don't recognize and the client never dropped. + * We log an error with the raw reason and record it on the span so + * we can find whichever code path added a new `abort(...)` call + * without updating the contract. + * + * IMPORTANT: `publisher.clientDisconnected` alone is NOT a reliable + * discriminator. When the user clicks Stop, `abortActiveStream` + * fires `abortController.abort(AbortReason.UserStop)`, which closes + * the SSE stream, which causes the BROWSER to disconnect its SSE + * reader, which propagates back as `publisher.markDisconnected()`. + * So on an explicit Stop you observe BOTH the explicit reason AND + * `clientDisconnected=true`. The reason string is the source of + * truth for intent; the disconnect flag is only a fallback. + */ + const recordCancelled = (errorMessage?: string): CopilotRequestCancelReasonValue => { + const rawReason = abortController.signal.reason + let cancelReason: CopilotRequestCancelReasonValue + if (isExplicitStopReason(rawReason)) { + cancelReason = CopilotRequestCancelReason.ExplicitStop + } else if (publisher.clientDisconnected) { + cancelReason = CopilotRequestCancelReason.ClientDisconnect + } else { + cancelReason = CopilotRequestCancelReason.Unknown + const serializedReason = + rawReason === undefined + ? 'undefined' + : rawReason instanceof Error + ? `${rawReason.name}: ${rawReason.message}` + : typeof rawReason === 'string' + ? rawReason + : (() => { + try { + return JSON.stringify(rawReason) + } catch { + return String(rawReason) + } + })() + // Not user-facing. Signals a contract violation: a code path + // aborted the stream with a reason that isn't in the known set, + // and the client didn't disconnect either. Whoever sees this + // should add the new reason to `AbortReason` / `isExplicitStopReason` + // (if it's explicit) or extend the classifier. + logger.error(`[${requestId}] Stream cancelled with unknown abort reason`, { + streamId, + chatId, + reason: serializedReason, + }) + activeOtelRoot.span.setAttribute( + TraceAttr.CopilotAbortUnknownReason, + serializedReason + ) + } + activeOtelRoot.span.setAttribute(TraceAttr.CopilotRequestCancelReason, cancelReason) + activeOtelRoot.span.addEvent(TraceEvent.RequestCancelled, { + [TraceAttr.CopilotRequestCancelReason]: cancelReason, + ...(errorMessage ? { [TraceAttr.ErrorMessage]: errorMessage } : {}), + }) + return cancelReason + } + const collector = new TraceCollector() return new ReadableStream({ @@ -204,6 +283,9 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS : result.success ? RequestTraceV1Outcome.success : RequestTraceV1Outcome.error + if (outcome === RequestTraceV1Outcome.cancelled) { + recordCancelled() + } await finalizeStream( result, publisher, @@ -215,6 +297,13 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS outcome = abortController.signal.aborted ? RequestTraceV1Outcome.cancelled : RequestTraceV1Outcome.error + if (outcome === RequestTraceV1Outcome.cancelled) { + // Error-path cancel: typically the stream raised before + // it saw the abort signal. Same classification rules as + // the happy path — we just also thread the error's + // message onto the event for triage. + recordCancelled(error instanceof Error ? error.message : String(error)) + } if (publisher.clientDisconnected) { logger.info(`[${requestId}] Stream errored after client disconnect`, { error: error instanceof Error ? error.message : 'Stream error', @@ -240,10 +329,10 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS collector.endSpan( requestSpan, outcome === RequestTraceV1Outcome.success - ? 'ok' + ? RequestTraceV1SpanStatus.ok : outcome === RequestTraceV1Outcome.cancelled - ? 'cancelled' - : 'error' + ? RequestTraceV1SpanStatus.cancelled + : RequestTraceV1SpanStatus.error ) clearInterval(abortPoller) diff --git a/apps/sim/lib/copilot/request/otel.ts b/apps/sim/lib/copilot/request/otel.ts index 41c4a5dd42d..b32ff46be44 100644 --- a/apps/sim/lib/copilot/request/otel.ts +++ b/apps/sim/lib/copilot/request/otel.ts @@ -10,10 +10,11 @@ import { TraceFlags, trace, } from '@opentelemetry/api' -import type { RequestTraceV1Outcome } from '@/lib/copilot/generated/request-trace-v1' +import { RequestTraceV1Outcome } from '@/lib/copilot/generated/request-trace-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { contextFromRequestHeaders } from '@/lib/copilot/request/go/propagation' +import { CopilotBranchKind, CopilotSurface } from '@/lib/copilot/generated/trace-attribute-values-v1' /** * OTel GenAI experimental semantic conventions env var. When set to a @@ -521,14 +522,17 @@ export function startCopilotOtelRoot(scope: CopilotOtelScope): CopilotOtelRoot { const finish: CopilotOtelRoot['finish'] = (outcome, error) => { if (finished) return finished = true - span.setAttribute(TraceAttr.CopilotRequestOutcome, outcome ?? 'success') + span.setAttribute( + TraceAttr.CopilotRequestOutcome, + outcome ?? RequestTraceV1Outcome.success + ) if (error) { span.setStatus({ code: SpanStatusCode.ERROR, message: error instanceof Error ? error.message : String(error), }) span.recordException(error instanceof Error ? error : new Error(String(error))) - } else if (outcome === 'success') { + } else if (outcome === RequestTraceV1Outcome.success) { span.setStatus({ code: SpanStatusCode.OK }) } span.end() @@ -558,7 +562,7 @@ function applyRequestShape(span: Span, shape: CopilotOtelRequestShape): void { span.setAttribute(TraceAttr.CopilotBranchKind, shape.branchKind) span.setAttribute( TraceAttr.CopilotSurface, - shape.branchKind === 'workflow' ? 'copilot' : 'mothership' + shape.branchKind === CopilotBranchKind.Workflow ? CopilotSurface.Copilot : CopilotSurface.Mothership ) } if (shape.mode) span.setAttribute(TraceAttr.CopilotMode, shape.mode) diff --git a/apps/sim/lib/copilot/request/session/abort.ts b/apps/sim/lib/copilot/request/session/abort.ts index bbd1cd19695..b5f8ec6b04b 100644 --- a/apps/sim/lib/copilot/request/session/abort.ts +++ b/apps/sim/lib/copilot/request/session/abort.ts @@ -4,6 +4,7 @@ import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' import { acquireLock, getRedisClient, releaseLock } from '@/lib/core/config/redis' import { clearAbortMarker, hasAbortMarker, writeAbortMarker } from './buffer' +import { AbortBackend } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('SessionAbort') @@ -136,7 +137,10 @@ export async function acquirePendingChatStream( }, async (span) => { const redis = getRedisClient() - span.setAttribute(TraceAttr.LockBackend, redis ? 'redis' : 'in_process') + span.setAttribute( + TraceAttr.LockBackend, + redis ? AbortBackend.Redis : AbortBackend.InProcess + ) if (redis) { const deadline = Date.now() + timeoutMs for (;;) { @@ -202,14 +206,78 @@ export async function acquirePendingChatStream( /** * Returns `true` if it aborted an in-process controller, * `false` if it only wrote the marker (no local controller found). + * + * Spanned because the two operations inside can stall independently + * — Redis latency on `writeAbortMarker` was previously invisible, and + * the "no local controller" branch (happens when the stream handler + * is on a different Sim box than the one receiving /chat/abort) is + * a subtle but important outcome to distinguish from "aborted a live + * controller" in dashboards. */ export async function abortActiveStream(streamId: string): Promise { - await writeAbortMarker(streamId) - const controller = activeStreams.get(streamId) - if (!controller) return false - controller.abort('user_stop:abortActiveStream') - activeStreams.delete(streamId) - return true + return withCopilotSpan( + TraceSpan.CopilotChatAbortActiveStream, + { [TraceAttr.StreamId]: streamId }, + async (span) => { + await writeAbortMarker(streamId) + span.setAttribute(TraceAttr.CopilotAbortMarkerWritten, true) + const controller = activeStreams.get(streamId) + if (!controller) { + span.setAttribute(TraceAttr.CopilotAbortControllerFired, false) + return false + } + controller.abort(AbortReason.UserStop) + activeStreams.delete(streamId) + span.setAttribute(TraceAttr.CopilotAbortControllerFired, true) + return true + } + ) +} + +/** + * Reason strings passed to `AbortController.abort(reason)` for every + * Sim-originated cancel path. Exported so the lifecycle finalizer can + * look at `signal.reason` and distinguish EXPLICIT stops (user hit the + * Stop button) from client disconnects (tab closed, network dropped) + * without guessing. + * + * Why this matters: when the user clicks Stop, we fire + * `abortController.abort(AbortReason.UserStop)` from + * `abortActiveStream()`. That causes Sim's SSE writer to close, which + * in turn makes the BROWSER's SSE reader see the stream end — which + * fires the browser-side fetch AbortController and propagates back to + * Sim as `publisher.markDisconnected()`. So on an explicit Stop you + * observe BOTH "explicit reason" AND "client disconnected" — the + * discriminator is the reason string, not the client flag. + * + * For any NEW abort path, add its reason here and in the + * `isExplicitStopReason` helper so classification stays correct. + */ +export const AbortReason = { + /** Same-process stop: browser→Sim→abortActiveStream. */ + UserStop: 'user_stop:abortActiveStream', + /** + * Cross-process stop: the SIM node that held the SSE didn't receive + * the Stop HTTP call, but it polled the Redis abort marker that the + * node that DID receive it wrote, and aborts on the poll. + */ + RedisPoller: 'redis_abort_marker:poller', + /** Internal timeout on the outbound explicit-abort fetch to Go. */ + ExplicitAbortFetchTimeout: 'timeout:go_explicit_abort_fetch', +} as const + +export type AbortReasonValue = (typeof AbortReason)[keyof typeof AbortReason] + +/** + * True iff `reason` indicates the user explicitly triggered the abort + * (as opposed to an implicit client disconnect or server timeout). + * Treated as a small closed vocabulary — any string not in + * `AbortReason` is presumed non-explicit. + */ +export function isExplicitStopReason(reason: unknown): boolean { + return ( + reason === AbortReason.UserStop || reason === AbortReason.RedisPoller + ) } const pollingStreams = new Set() @@ -230,7 +298,7 @@ export function startAbortPoller( try { const shouldAbort = await hasAbortMarker(streamId) if (shouldAbort && !abortController.signal.aborted) { - abortController.abort('redis_abort_marker:poller') + abortController.abort(AbortReason.RedisPoller) await clearAbortMarker(streamId) } } catch (error) { diff --git a/apps/sim/lib/copilot/request/session/explicit-abort.ts b/apps/sim/lib/copilot/request/session/explicit-abort.ts index c5356050307..223fff9266d 100644 --- a/apps/sim/lib/copilot/request/session/explicit-abort.ts +++ b/apps/sim/lib/copilot/request/session/explicit-abort.ts @@ -2,6 +2,7 @@ import type { Context } from '@opentelemetry/api' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { fetchGo } from '@/lib/copilot/request/go/fetch' +import { AbortReason } from '@/lib/copilot/request/session/abort' import { env } from '@/lib/core/config/env' export const DEFAULT_EXPLICIT_ABORT_TIMEOUT_MS = 3000 @@ -29,7 +30,10 @@ export async function requestExplicitStreamAbort(params: { } const controller = new AbortController() - const timeout = setTimeout(() => controller.abort('timeout:go_explicit_abort_fetch'), timeoutMs) + const timeout = setTimeout( + () => controller.abort(AbortReason.ExplicitAbortFetchTimeout), + timeoutMs + ) try { const response = await fetchGo(`${SIM_AGENT_API_URL}/api/streams/explicit-abort`, { diff --git a/apps/sim/lib/copilot/request/session/index.ts b/apps/sim/lib/copilot/request/session/index.ts index 70466d11b7a..b9a4caf44aa 100644 --- a/apps/sim/lib/copilot/request/session/index.ts +++ b/apps/sim/lib/copilot/request/session/index.ts @@ -1,8 +1,11 @@ export { abortActiveStream, + AbortReason, + type AbortReasonValue, acquirePendingChatStream, cleanupAbortMarker, getPendingChatStreamId, + isExplicitStopReason, registerActiveStream, releasePendingChatStream, startAbortPoller, diff --git a/apps/sim/lib/copilot/request/session/recovery.ts b/apps/sim/lib/copilot/request/session/recovery.ts index 3dcc3b341f1..7c0e4df6085 100644 --- a/apps/sim/lib/copilot/request/session/recovery.ts +++ b/apps/sim/lib/copilot/request/session/recovery.ts @@ -8,6 +8,7 @@ import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' import { getLatestSeq, getOldestSeq, readEvents } from './buffer' import { createEvent } from './event' +import { CopilotRecoveryOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('SessionRecovery') @@ -56,7 +57,7 @@ export async function checkForReplayGap( oldestAvailableSeq: oldestSeq, latestSeq, }) - span.setAttribute(TraceAttr.CopilotRecoveryOutcome, 'gap_detected') + span.setAttribute(TraceAttr.CopilotRecoveryOutcome, CopilotRecoveryOutcome.GapDetected) const gapEnvelope = createEvent({ streamId, @@ -92,7 +93,7 @@ export async function checkForReplayGap( } } - span.setAttribute(TraceAttr.CopilotRecoveryOutcome, 'in_range') + span.setAttribute(TraceAttr.CopilotRecoveryOutcome, CopilotRecoveryOutcome.InRange) return null } ) diff --git a/apps/sim/lib/copilot/request/tools/files.ts b/apps/sim/lib/copilot/request/tools/files.ts index 867c4e1fe55..061a69c862d 100644 --- a/apps/sim/lib/copilot/request/tools/files.ts +++ b/apps/sim/lib/copilot/request/tools/files.ts @@ -5,6 +5,7 @@ import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/request/types' import { uploadWorkspaceFile } from '@/lib/uploads/contexts/workspace/workspace-file-manager' +import { CopilotOutputFileOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('CopilotToolResultFiles') @@ -186,7 +187,7 @@ export async function maybeWriteOutputToFile( ) span.setAttributes({ [TraceAttr.CopilotOutputFileId]: uploaded.id, - [TraceAttr.CopilotOutputFileOutcome]: 'uploaded', + [TraceAttr.CopilotOutputFileOutcome]: CopilotOutputFileOutcome.Uploaded, }) logger.info('Tool output written to file', { @@ -214,7 +215,7 @@ export async function maybeWriteOutputToFile( outputPath, error: message, }) - span.setAttribute(TraceAttr.CopilotOutputFileOutcome, 'failed') + span.setAttribute(TraceAttr.CopilotOutputFileOutcome, CopilotOutputFileOutcome.Failed) span.addEvent('copilot.output_file.error', { 'error.message': message.slice(0, 500), }) diff --git a/apps/sim/lib/copilot/request/tools/tables.ts b/apps/sim/lib/copilot/request/tools/tables.ts index a012a4ddda8..5c3039af608 100644 --- a/apps/sim/lib/copilot/request/tools/tables.ts +++ b/apps/sim/lib/copilot/request/tools/tables.ts @@ -9,7 +9,7 @@ import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/request/types' import { getTableById } from '@/lib/table/service' - +import { CopilotTableOutcome, CopilotTableSourceFormat } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('CopilotToolResultTables') const MAX_OUTPUT_TABLE_ROWS = 10_000 @@ -39,7 +39,7 @@ export async function maybeWriteOutputToTable( try { const table = await getTableById(outputTable) if (!table) { - span.setAttribute(TraceAttr.CopilotTableOutcome, 'table_not_found') + span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.TableNotFound) return { success: false, error: `Table "${outputTable}" not found`, @@ -54,7 +54,7 @@ export async function maybeWriteOutputToTable( if (Array.isArray(inner)) { rows = inner } else { - span.setAttribute(TraceAttr.CopilotTableOutcome, 'invalid_shape') + span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.InvalidShape) return { success: false, error: 'outputTable requires the code to return an array of objects', @@ -63,7 +63,7 @@ export async function maybeWriteOutputToTable( } else if (Array.isArray(rawOutput)) { rows = rawOutput } else { - span.setAttribute(TraceAttr.CopilotTableOutcome, 'invalid_shape') + span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.InvalidShape) return { success: false, error: 'outputTable requires the code to return an array of objects', @@ -73,7 +73,7 @@ export async function maybeWriteOutputToTable( span.setAttribute(TraceAttr.CopilotTableRowCount, rows.length) if (rows.length > MAX_OUTPUT_TABLE_ROWS) { - span.setAttribute(TraceAttr.CopilotTableOutcome, 'row_limit_exceeded') + span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.RowLimitExceeded) return { success: false, error: `outputTable row limit exceeded: got ${rows.length}, max is ${MAX_OUTPUT_TABLE_ROWS}`, @@ -81,7 +81,7 @@ export async function maybeWriteOutputToTable( } if (rows.length === 0) { - span.setAttribute(TraceAttr.CopilotTableOutcome, 'empty_rows') + span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.EmptyRows) return { success: false, error: 'outputTable requires at least one row — code returned an empty array', @@ -122,7 +122,7 @@ export async function maybeWriteOutputToTable( tableId: outputTable, rowCount: rows.length, }) - span.setAttribute(TraceAttr.CopilotTableOutcome, 'wrote') + span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.Wrote) return { success: true, output: { @@ -137,7 +137,7 @@ export async function maybeWriteOutputToTable( outputTable, error: err instanceof Error ? err.message : String(err), }) - span.setAttribute(TraceAttr.CopilotTableOutcome, 'failed') + span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.Failed) span.addEvent('copilot.table.error', { 'error.message': (err instanceof Error ? err.message : String(err)).slice(0, 500), }) @@ -174,14 +174,14 @@ export async function maybeWriteReadCsvToTable( try { const table = await getTableById(outputTable) if (!table) { - span.setAttribute(TraceAttr.CopilotTableOutcome, 'table_not_found') + span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.TableNotFound) return { success: false, error: `Table "${outputTable}" not found` } } const output = result.output as Record const content = (output.content as string) || '' if (!content.trim()) { - span.setAttribute(TraceAttr.CopilotTableOutcome, 'empty_content') + span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.EmptyContent) return { success: false, error: 'File has no content to import into table' } } @@ -198,7 +198,7 @@ export async function maybeWriteReadCsvToTable( if (ext === 'json') { const parsed = JSON.parse(content) if (!Array.isArray(parsed)) { - span.setAttribute(TraceAttr.CopilotTableOutcome, 'invalid_json_shape') + span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.InvalidJsonShape) return { success: false, error: 'JSON file must contain an array of objects for table import', @@ -220,12 +220,12 @@ export async function maybeWriteReadCsvToTable( span.setAttribute(TraceAttr.CopilotTableRowCount, rows.length) if (rows.length === 0) { - span.setAttribute(TraceAttr.CopilotTableOutcome, 'empty_rows') + span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.EmptyRows) return { success: false, error: 'File has no data rows to import' } } if (rows.length > MAX_OUTPUT_TABLE_ROWS) { - span.setAttribute(TraceAttr.CopilotTableOutcome, 'row_limit_exceeded') + span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.RowLimitExceeded) return { success: false, error: `Row limit exceeded: got ${rows.length}, max is ${MAX_OUTPUT_TABLE_ROWS}`, @@ -268,7 +268,7 @@ export async function maybeWriteReadCsvToTable( rowCount: rows.length, filePath, }) - span.setAttribute(TraceAttr.CopilotTableOutcome, 'imported') + span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.Imported) return { success: true, output: { @@ -284,7 +284,7 @@ export async function maybeWriteReadCsvToTable( outputTable, error: err instanceof Error ? err.message : String(err), }) - span.setAttribute(TraceAttr.CopilotTableOutcome, 'failed') + span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.Failed) span.addEvent('copilot.table.error', { 'error.message': (err instanceof Error ? err.message : String(err)).slice(0, 500), }) diff --git a/apps/sim/lib/copilot/tools/client/run-tool-execution.ts b/apps/sim/lib/copilot/tools/client/run-tool-execution.ts index 860dc7f0184..7c741072826 100644 --- a/apps/sim/lib/copilot/tools/client/run-tool-execution.ts +++ b/apps/sim/lib/copilot/tools/client/run-tool-execution.ts @@ -1,6 +1,7 @@ import { createLogger } from '@sim/logger' import type { AsyncCompletionData } from '@/lib/copilot/async-runs/lifecycle' import { COPILOT_CONFIRM_API_PATH } from '@/lib/copilot/constants' +import { traceparentHeader } from '@/lib/copilot/tools/client/trace-context' import { MothershipStreamV1ToolOutcome } from '@/lib/copilot/generated/mothership-stream-v1' import { RunBlock, @@ -465,7 +466,10 @@ async function reportCompletion( }) const res = await fetch(COPILOT_CONFIRM_API_PATH, { method: 'POST', - headers: { 'Content-Type': 'application/json' }, + // Propagate the chat's root traceparent so the confirm handler + // becomes a child span of the original request's trace. See + // `trace-context.ts` for why this lives in a module singleton. + headers: { 'Content-Type': 'application/json', ...traceparentHeader() }, body, }) const LARGE_PAYLOAD_THRESHOLD = 10 * 1024 * 1024 @@ -479,7 +483,7 @@ async function reportCompletion( }) const retryRes = await fetch(COPILOT_CONFIRM_API_PATH, { method: 'POST', - headers: { 'Content-Type': 'application/json' }, + headers: { 'Content-Type': 'application/json', ...traceparentHeader() }, body: JSON.stringify({ toolCallId, status, diff --git a/apps/sim/lib/copilot/tools/client/trace-context.ts b/apps/sim/lib/copilot/tools/client/trace-context.ts new file mode 100644 index 00000000000..c87c6ebff8d --- /dev/null +++ b/apps/sim/lib/copilot/tools/client/trace-context.ts @@ -0,0 +1,58 @@ +/** + * Browser-side mutable holder for the W3C `traceparent` of the + * current copilot chat stream. + * + * Why this exists as a module-level singleton rather than React + * state / ref: the client-tool-execution code path fires off HTTP + * callbacks (`/api/copilot/confirm`) from arbitrary depth inside + * tool runners that aren't children of any React component tree — + * some are triggered from workflow-runtime callbacks, iframed + * editors, or generic promise chains. Threading a trace id through + * those layers would require changing a dozen function signatures + * across packages we don't control. + * + * A module-level holder works because the browser only ever has one + * active copilot chat at a time (the UI gates sending a new one on + * the stop-barrier). The chat-session hook writes this on the first + * chat POST response and nulls it out when the stream terminates, + * so client tool callbacks emitted during that window can read the + * right value without plumbing. + * + * Not an `export const obj`; using getters/setters so callers can't + * accidentally mutate the backing field (e.g. a stale ref held from + * before a new chat started). Keep this module tiny — it has one + * job. + */ + +let currentTraceparent: string | undefined + +/** + * Set the traceparent for the current chat stream. Called by the + * chat-session hook after receiving the `traceparent` response + * header from the initial chat POST. Pass `undefined` to clear it + * when the stream terminates or a new chat begins. + */ +export function setCurrentChatTraceparent(value: string | undefined): void { + currentTraceparent = value +} + +/** + * Read the traceparent for the currently-active chat. Returns + * `undefined` if no chat is in-flight — callers should fall through + * without a traceparent header in that case, NOT block or throw. + */ +export function getCurrentChatTraceparent(): string | undefined { + return currentTraceparent +} + +/** + * Convenience: header spread suitable for inclusion in `fetch` init + * objects. Returns `{}` when no traceparent is set so the spread is + * safe to use unconditionally: + * + * await fetch(url, { headers: { ...tracepa rentHeader(), ... } }) + */ +export function traceparentHeader(): Record { + const tp = currentTraceparent + return tp ? { traceparent: tp } : {} +} diff --git a/apps/sim/lib/copilot/vfs/file-reader.ts b/apps/sim/lib/copilot/vfs/file-reader.ts index 9550ee79771..9f710151dab 100644 --- a/apps/sim/lib/copilot/vfs/file-reader.ts +++ b/apps/sim/lib/copilot/vfs/file-reader.ts @@ -5,7 +5,7 @@ import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import type { WorkspaceFileRecord } from '@/lib/uploads/contexts/workspace/workspace-file-manager' import { downloadWorkspaceFile } from '@/lib/uploads/contexts/workspace/workspace-file-manager' import { isImageFileType } from '@/lib/uploads/utils/file-utils' - +import { CopilotVfsOutcome, CopilotVfsReadOutcome, CopilotVfsReadPath } from '@/lib/copilot/generated/trace-attribute-values-v1' /** * Lazy tracer (see lib/copilot/request/otel.ts for the same pattern and * why we resolve on every call). @@ -144,7 +144,7 @@ async function prepareImageForVision( if (!needsResize) { span.setAttributes({ [TraceAttr.CopilotVfsResized]: false, - [TraceAttr.CopilotVfsOutcome]: 'passthrough_fits_budget', + [TraceAttr.CopilotVfsOutcome]: CopilotVfsOutcome.PassthroughFitsBudget, [TraceAttr.CopilotVfsOutputBytes]: buffer.length, [TraceAttr.CopilotVfsOutputMediaType]: mediaType, }) @@ -210,7 +210,7 @@ async function prepareImageForVision( [TraceAttr.CopilotVfsResizeChosenQuality]: quality, [TraceAttr.CopilotVfsOutputBytes]: transformed.buffer.length, [TraceAttr.CopilotVfsOutputMediaType]: transformed.mediaType, - [TraceAttr.CopilotVfsOutcome]: 'resized', + [TraceAttr.CopilotVfsOutcome]: CopilotVfsOutcome.Resized, }) return { buffer: transformed.buffer, @@ -237,7 +237,7 @@ async function prepareImageForVision( span.setAttributes({ [TraceAttr.CopilotVfsResized]: false, [TraceAttr.CopilotVfsResizeAttempts]: attempts, - [TraceAttr.CopilotVfsOutcome]: 'rejected_too_large_after_resize', + [TraceAttr.CopilotVfsOutcome]: CopilotVfsOutcome.RejectedTooLargeAfterResize, }) return null } catch (err) { @@ -287,11 +287,11 @@ export async function readFileRecord(record: WorkspaceFileRecord): Promise { try { if (isImageFileType(record.type)) { - span.setAttribute(TraceAttr.CopilotVfsReadPath, 'image') + span.setAttribute(TraceAttr.CopilotVfsReadPath, CopilotVfsReadPath.Image) const originalBuffer = await downloadWorkspaceFile(record) const prepared = await prepareImageForVision(originalBuffer, record.type) if (!prepared) { - span.setAttribute(TraceAttr.CopilotVfsReadOutcome, 'image_too_large') + span.setAttribute(TraceAttr.CopilotVfsReadOutcome, CopilotVfsReadOutcome.ImageTooLarge) return { content: `[Image too large: ${record.name} (${(record.size / 1024 / 1024).toFixed(1)}MB, limit 5MB after resize/compression)]`, totalLines: 1, @@ -300,7 +300,7 @@ export async function readFileRecord(record: WorkspaceFileRecord): Promise MAX_TEXT_READ_BYTES) { - span.setAttribute(TraceAttr.CopilotVfsReadOutcome, 'text_too_large') + span.setAttribute(TraceAttr.CopilotVfsReadOutcome, CopilotVfsReadOutcome.TextTooLarge) return { content: `[File too large to display inline: ${record.name} (${record.size} bytes, limit ${MAX_TEXT_READ_BYTES})]`, totalLines: 1, @@ -333,7 +333,7 @@ export async function readFileRecord(record: WorkspaceFileRecord): Promise): ExtractedEnum[] { + const defs = (schema.$defs ?? {}) as Record + const out: ExtractedEnum[] = [] + for (const [name, def] of Object.entries(defs)) { + if (!def || typeof def !== 'object') continue + const enumValues = (def as Record).enum + if (!Array.isArray(enumValues)) continue + if (!enumValues.every((v) => typeof v === 'string')) continue + out.push({ name, values: (enumValues as string[]).slice().sort() }) + } + out.sort((a, b) => a.name.localeCompare(b.name)) + return out +} + +/** + * PascalCase identifier for a wire enum value. Mirrors the algorithm + * used by the span-names + attribute-keys scripts, so + * `explicit_stop` -> `ExplicitStop`, matching what a reader would + * guess from Go's exported constants. + */ +function toValueIdent(value: string): string { + const parts = value.split(/[^A-Za-z0-9]+/).filter(Boolean) + if (parts.length === 0) { + throw new Error(`Cannot derive identifier for enum value: ${value}`) + } + const ident = parts + .map((p) => p.charAt(0).toUpperCase() + p.slice(1).toLowerCase()) + .join('') + if (/^[0-9]/.test(ident)) { + throw new Error( + `Derived identifier "${ident}" for value "${value}" starts with a digit`, + ) + } + return ident +} + +function renderEnum(e: ExtractedEnum): string { + const seen = new Map() + const lines = e.values.map((v) => { + const ident = toValueIdent(v) + const prev = seen.get(ident) + if (prev && prev !== v) { + throw new Error( + `Enum ${e.name}: identifier collision — "${prev}" and "${v}" both map to "${ident}"`, + ) + } + seen.set(ident, v) + return ` ${ident}: ${JSON.stringify(v)},` + }) + + return `export const ${e.name} = { +${lines.join('\n')} +} as const; + +export type ${e.name}Key = keyof typeof ${e.name}; +export type ${e.name}Value = (typeof ${e.name})[${e.name}Key];` +} + +function render(enums: ExtractedEnum[]): string { + const body = enums.map(renderEnum).join('\n\n') + return `// AUTO-GENERATED FILE. DO NOT EDIT. +// +// Source: copilot/copilot/contracts/trace-attribute-values-v1.schema.json +// Regenerate with: bun run trace-attribute-values-contract:generate +// +// Canonical closed-set value vocabularies for mothership OTel +// attributes. Call sites should reference e.g. +// \`CopilotRequestCancelReason.ExplicitStop\` rather than the raw +// string literal, so typos become compile errors and the Go contract +// remains the single source of truth. + +${body} +` +} + +async function main() { + const checkOnly = process.argv.includes('--check') + const inputArg = process.argv.find((a) => a.startsWith('--input=')) + const inputPath = inputArg + ? resolve(ROOT, inputArg.slice('--input='.length)) + : DEFAULT_CONTRACT_PATH + + const raw = await readFile(inputPath, 'utf8') + const schema = JSON.parse(raw) + const enums = extractEnums(schema) + if (enums.length === 0) { + throw new Error( + 'No enum $defs found in trace-attribute-values-v1.schema.json — did you add the Go type to TraceAttributeValuesV1AllDefs?', + ) + } + const rendered = render(enums) + + if (checkOnly) { + const existing = await readFile(OUTPUT_PATH, 'utf8').catch(() => null) + if (existing !== rendered) { + throw new Error( + 'Generated trace attribute values contract is stale. Run: bun run trace-attribute-values-contract:generate', + ) + } + console.log('Trace attribute values contract is up to date.') + return + } + + await mkdir(dirname(OUTPUT_PATH), { recursive: true }) + await writeFile(OUTPUT_PATH, rendered, 'utf8') + console.log(`Generated trace attribute values types -> ${OUTPUT_PATH}`) +} + +main().catch((err) => { + console.error(err) + process.exit(1) +}) diff --git a/scripts/sync-trace-events-contract.ts b/scripts/sync-trace-events-contract.ts new file mode 100644 index 00000000000..7e858f4e2a6 --- /dev/null +++ b/scripts/sync-trace-events-contract.ts @@ -0,0 +1,137 @@ +import { mkdir, readFile, writeFile } from 'node:fs/promises' +import { dirname, resolve } from 'node:path' +import { fileURLToPath } from 'node:url' + +/** + * Generate `apps/sim/lib/copilot/generated/trace-events-v1.ts` from + * the Go-side `contracts/trace-events-v1.schema.json` contract. + * + * Mirrors the span-names + attribute-keys sync scripts exactly — the + * only difference is the $defs key (`TraceEventsV1Name`), the output + * path, and the generated const name (`TraceEvent`). Keeping the + * scripts structurally identical means a reader who understands one + * understands all three, and drift between them gets caught + * immediately in code review. + */ +const SCRIPT_DIR = dirname(fileURLToPath(import.meta.url)) +const ROOT = resolve(SCRIPT_DIR, '..') +const DEFAULT_CONTRACT_PATH = resolve( + ROOT, + '../copilot/copilot/contracts/trace-events-v1.schema.json', +) +const OUTPUT_PATH = resolve( + ROOT, + 'apps/sim/lib/copilot/generated/trace-events-v1.ts', +) + +function extractEventNames(schema: Record): string[] { + const defs = (schema.$defs ?? {}) as Record + const nameDef = defs.TraceEventsV1Name + if ( + !nameDef || + typeof nameDef !== 'object' || + !Array.isArray((nameDef as Record).enum) + ) { + throw new Error( + 'trace-events-v1.schema.json is missing $defs.TraceEventsV1Name.enum', + ) + } + const enumValues = (nameDef as Record).enum as unknown[] + if (!enumValues.every((v) => typeof v === 'string')) { + throw new Error('TraceEventsV1Name enum must be string-only') + } + return (enumValues as string[]).slice().sort() +} + +function toIdentifier(name: string): string { + const parts = name.split(/[^A-Za-z0-9]+/).filter(Boolean) + if (parts.length === 0) { + throw new Error(`Cannot derive identifier for event name: ${name}`) + } + const ident = parts + .map((p) => p.charAt(0).toUpperCase() + p.slice(1).toLowerCase()) + .join('') + if (/^[0-9]/.test(ident)) { + throw new Error( + `Derived identifier "${ident}" for event "${name}" starts with a digit`, + ) + } + return ident +} + +function render(eventNames: string[]): string { + const pairs = eventNames.map((name) => ({ name, ident: toIdentifier(name) })) + + const seen = new Map() + for (const p of pairs) { + const prev = seen.get(p.ident) + if (prev && prev !== p.name) { + throw new Error( + `Identifier collision: "${prev}" and "${p.name}" both map to "${p.ident}"`, + ) + } + seen.set(p.ident, p.name) + } + + const constLines = pairs + .map((p) => ` ${p.ident}: ${JSON.stringify(p.name)},`) + .join('\n') + const arrayEntries = eventNames.map((n) => ` ${JSON.stringify(n)},`).join('\n') + + return `// AUTO-GENERATED FILE. DO NOT EDIT. +// +// Source: copilot/copilot/contracts/trace-events-v1.schema.json +// Regenerate with: bun run trace-events-contract:generate +// +// Canonical mothership OTel span event names. Call sites should +// reference \`TraceEvent.\` (e.g. +// \`TraceEvent.RequestCancelled\`) rather than raw string literals, +// so the Go-side contract is the single source of truth and typos +// become compile errors. + +export const TraceEvent = { +${constLines} +} as const; + +export type TraceEventKey = keyof typeof TraceEvent; +export type TraceEventValue = (typeof TraceEvent)[TraceEventKey]; + +/** Readonly sorted list of every canonical event name. */ +export const TraceEventValues: readonly TraceEventValue[] = [ +${arrayEntries} +] as const; +` +} + +async function main() { + const checkOnly = process.argv.includes('--check') + const inputArg = process.argv.find((a) => a.startsWith('--input=')) + const inputPath = inputArg + ? resolve(ROOT, inputArg.slice('--input='.length)) + : DEFAULT_CONTRACT_PATH + + const raw = await readFile(inputPath, 'utf8') + const schema = JSON.parse(raw) + const eventNames = extractEventNames(schema) + const rendered = render(eventNames) + + if (checkOnly) { + const existing = await readFile(OUTPUT_PATH, 'utf8').catch(() => null) + if (existing !== rendered) { + throw new Error( + 'Generated trace events contract is stale. Run: bun run trace-events-contract:generate', + ) + } + console.log('Trace events contract is up to date.') + return + } + + await mkdir(dirname(OUTPUT_PATH), { recursive: true }) + await writeFile(OUTPUT_PATH, rendered, 'utf8') + console.log(`Generated trace events types -> ${OUTPUT_PATH}`) +} + +main().catch((err) => { + console.error(err) + process.exit(1) +}) From 792459c03bc93b73bbe3356ededdbf771bbb2559 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Sat, 18 Apr 2026 13:16:49 -0700 Subject: [PATCH 08/10] Update --- apps/sim/app/api/copilot/chat/abort/route.ts | 40 +++++- apps/sim/app/api/copilot/chat/stop/route.ts | 20 ++- .../message-actions/message-actions.tsx | 101 ++++++++++---- .../mothership-chat/mothership-chat.tsx | 30 ++-- .../[workspaceId]/home/hooks/use-chat.ts | 132 +++++++++++++++--- apps/sim/lib/copilot/async-runs/repository.ts | 65 +++++---- apps/sim/lib/copilot/chat/post.ts | 21 ++- .../generated/trace-attribute-values-v1.ts | 1 + apps/sim/lib/copilot/request/go/fetch.ts | 29 +++- .../lib/copilot/request/lifecycle/finalize.ts | 31 +++- apps/sim/lib/copilot/request/lifecycle/run.ts | 25 +++- .../lib/copilot/request/lifecycle/start.ts | 72 +++++++--- apps/sim/lib/copilot/request/otel.ts | 112 ++++++++++----- apps/sim/lib/copilot/request/session/abort.ts | 6 +- .../copilot/request/session/explicit-abort.ts | 2 +- apps/sim/lib/copilot/request/types.ts | 13 ++ 16 files changed, 538 insertions(+), 162 deletions(-) diff --git a/apps/sim/app/api/copilot/chat/abort/route.ts b/apps/sim/app/api/copilot/chat/abort/route.ts index 7fd8e2493a3..aa41c5bc940 100644 --- a/apps/sim/app/api/copilot/chat/abort/route.ts +++ b/apps/sim/app/api/copilot/chat/abort/route.ts @@ -71,6 +71,39 @@ export async function POST(request: Request) { } if (chatId) rootSpan.setAttribute(TraceAttr.ChatId, chatId) + // ORDER MATTERS: local abort FIRST, Go explicit-abort SECOND. + // + // Sim and Go each own a separate Redis instance and do not share + // state through it — the only signal that crosses the service + // boundary is this HTTP call. So the race to win is purely + // Sim-internal: + // + // - `abortActiveStream` flips the AbortController (reason = + // AbortReason.UserStop) that's wrapped around the in-flight + // `fetchGo('/api/mothership', ...)` SSE stream. Once flipped, + // the stream throws AbortError on the next chunk read, and + // the lifecycle catch block's classifier sees + // `signal.aborted = true` with an explicit-stop reason → the + // root span gets stamped `cancel_reason = explicit_stop` and + // the `request.cancelled` event fires correctly. + // + // - If we call Go first (old order), Go's context cancels from + // its own explicit-abort handler, the /api/mothership stream + // errors with "context canceled", and Sim's catch block fires + // BEFORE we've flipped the local AbortController. At that + // point `signal.aborted` is still false, so the classifier + // falls through to `client_disconnect` / `unknown` and the + // root ends up as `outcome = error` — which is what we saw + // in trace 25f31730082078cef54653b1740caf12. + // + // Go's explicit-abort endpoint still runs second: it's what tells + // Go-side billing "this was intentional, flush the paused ledger" + // and is unaffected by the reorder (Go's context is already + // cancelled by the time we get there; the endpoint's job is + // billing semantics, not cancelling in-flight work). + const aborted = await abortActiveStream(streamId) + rootSpan.setAttribute(TraceAttr.CopilotAbortLocalAborted, aborted) + let goAbortOk = false try { const headers: Record = { 'Content-Type': 'application/json' } @@ -94,7 +127,7 @@ export async function POST(request: Request) { spanName: 'sim → go /api/streams/explicit-abort', operation: 'explicit_abort', attributes: { - 'copilot.stream.id': streamId, + [TraceAttr.StreamId]: streamId, ...(chatId ? { [TraceAttr.ChatId]: chatId } : {}), }, }).finally(() => clearTimeout(timeout)) @@ -103,16 +136,13 @@ export async function POST(request: Request) { } goAbortOk = true } catch (err) { - logger.warn('Explicit abort marker request failed; proceeding with local abort', { + logger.warn('Explicit abort marker request failed after local abort', { streamId, error: err instanceof Error ? err.message : String(err), }) } rootSpan.setAttribute(TraceAttr.CopilotAbortGoMarkerOk, goAbortOk) - const aborted = await abortActiveStream(streamId) - rootSpan.setAttribute(TraceAttr.CopilotAbortLocalAborted, aborted) - if (chatId) { // `waitForPendingChatStream` blocks up to 8s waiting for the // prior stream's release. It's THE single most likely stall diff --git a/apps/sim/app/api/copilot/chat/stop/route.ts b/apps/sim/app/api/copilot/chat/stop/route.ts index a0a0cb68570..569b9487bb2 100644 --- a/apps/sim/app/api/copilot/chat/stop/route.ts +++ b/apps/sim/app/api/copilot/chat/stop/route.ts @@ -58,6 +58,14 @@ const StopSchema = z.object({ streamId: z.string(), content: z.string(), contentBlocks: z.array(ContentBlockSchema).optional(), + /** + * Optional because older clients may not send it, but strongly + * recommended: without it, the stopped assistant message persisted + * below loses its `requestId`, which breaks the "Copy request ID" + * button in the UI (it's the only handle the user has for filing + * bug reports about a hung / bad turn). + */ + requestId: z.string().optional(), }) /** @@ -84,13 +92,16 @@ export async function POST(req: NextRequest) { return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) } - const { chatId, streamId, content, contentBlocks } = StopSchema.parse(await req.json()) + const { chatId, streamId, content, contentBlocks, requestId } = StopSchema.parse( + await req.json() + ) span.setAttributes({ [TraceAttr.ChatId]: chatId, [TraceAttr.StreamId]: streamId, [TraceAttr.UserId]: session.user.id, [TraceAttr.CopilotStopContentLength]: content.length, [TraceAttr.CopilotStopBlocksCount]: contentBlocks?.length ?? 0, + ...(requestId ? { [TraceAttr.RequestId]: requestId } : {}), }) const [row] = await db @@ -141,6 +152,13 @@ export async function POST(req: NextRequest) { content, timestamp: new Date().toISOString(), contentBlocks: synthesizedStoppedBlocks, + // Preserve the requestId onto the persisted aborted message + // so the UI's "Copy request ID" button keeps working after + // the chat history refetches and replaces the in-memory + // streaming message with this persisted version. Without + // this, the button blinks out ~1-2s after the user hits + // Stop because the refetched message has no requestId. + ...(requestId ? { requestId } : {}), }) const assistantMessage: PersistedMessage = normalized setClause.messages = sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb` diff --git a/apps/sim/app/workspace/[workspaceId]/components/message-actions/message-actions.tsx b/apps/sim/app/workspace/[workspaceId]/components/message-actions/message-actions.tsx index 67c7dd92c8f..322b0e98bf9 100644 --- a/apps/sim/app/workspace/[workspaceId]/components/message-actions/message-actions.tsx +++ b/apps/sim/app/workspace/[workspaceId]/components/message-actions/message-actions.tsx @@ -138,35 +138,86 @@ export function MessageActions({ content, chatId, userQuery, requestId }: Messag } }, []) - if (!content) return null + // Render the action row whenever there's ANYTHING to act on. For a + // normal assistant turn that's `content`; for an aborted / error / + // content-less turn it's the `requestId` alone — users still need to + // be able to grab the trace ID for bug reports in those cases. + const hasContent = Boolean(content) + if (!hasContent && !requestId) return null + + const canSubmitFeedback = Boolean(chatId && userQuery) return ( <>
- - - + {hasContent && ( + + + + + + {copied ? 'Copied message' : 'Copy message'} + + + )} + {canSubmitFeedback && ( + <> + + + + + Good response + + + + + + Bad response + + + )} + {requestId && ( + + + + + + {copiedRequestId ? 'Copied request ID' : 'Copy request ID'} + + + )}
diff --git a/apps/sim/app/workspace/[workspaceId]/home/components/mothership-chat/mothership-chat.tsx b/apps/sim/app/workspace/[workspaceId]/home/components/mothership-chat/mothership-chat.tsx index fb94ac5a759..6868644f1b7 100644 --- a/apps/sim/app/workspace/[workspaceId]/home/components/mothership-chat/mothership-chat.tsx +++ b/apps/sim/app/workspace/[workspaceId]/home/components/mothership-chat/mothership-chat.tsx @@ -179,16 +179,26 @@ export function MothershipChat({ onOptionSelect={isLastMessage ? onSubmit : undefined} onWorkspaceResourceSelect={onWorkspaceResourceSelect} /> - {!isThisStreaming && (msg.content || msg.contentBlocks?.length) && ( -
- -
- )} + {/* + Render the actions row whenever the assistant turn has + settled (not streaming) AND there's anything to act on. + We intentionally include `requestId` in the trigger so + that aborted or content-less turns still surface the + copy-trace-ID button — dropping the row in those cases + makes it impossible for a user to grab the request ID + needed for bug reports. + */} + {!isThisStreaming && + (msg.content || msg.contentBlocks?.length || msg.requestId) && ( +
+ +
+ )} ) })} diff --git a/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts b/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts index f1b411942eb..713632bf8a0 100644 --- a/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts +++ b/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts @@ -1279,6 +1279,15 @@ export function useChat( // child spans of the same trace instead of disconnected roots. // Cleared when a new chat starts (overwritten by the next POST). const streamTraceparentRef = useRef(undefined) + /** + * The `request.id` stamped on the active stream's trace events. Used + * to forward to the server-side Stop route so the persisted aborted + * assistant message keeps its requestId — which the UI needs for the + * "Copy request ID" button (the user's handle for bug reports on bad + * turns). Updated from the `trace` payload on every parsed stream + * event; reset by the caller when a new chat starts. + */ + const streamRequestIdRef = useRef(undefined) const locallyTerminalStreamIdRef = useRef(undefined) const lastCursorRef = useRef('0') const sendingRef = useRef(false) @@ -1317,6 +1326,14 @@ export function useChat( activeTurnRef.current = null pendingUserMsgRef.current = null streamIdRef.current = undefined + streamRequestIdRef.current = undefined + // Drop the previous stream's W3C traceparent so side-channel + // requests fired AFTER a turn ended don't propagate stale trace + // context. Without this the abort/confirm/stop handlers could + // still parent under the finished turn's span; each new chat POST + // overwrites this ref to the fresh turn's traceparent. + streamTraceparentRef.current = undefined + setCurrentChatTraceparent(undefined) lastCursorRef.current = '0' resetStreamingBuffers() }, [resetStreamingBuffers]) @@ -1818,6 +1835,12 @@ export function useChat( readLoop: while (true) { if (pendingLines.length === 0) { + // Once the terminal `complete` event has been processed, + // don't read another chunk — we've drained everything + // that was locally buffered alongside the terminator. + // Any further events would be a server-side bug (emitting + // after `complete`); don't hang waiting for them. + if (sawCompleteEvent) break const { done, value } = await reader.read() if (done) break if (isStale()) continue @@ -1857,6 +1880,10 @@ export function useChat( if (parsed.trace?.requestId && parsed.trace.requestId !== streamRequestId) { streamRequestId = parsed.trace.requestId + // Mirror into a ref so stopGeneration / persistPartialResponse + // can read the latest requestId without being re-created on + // every render (they close over the ref, not a stale value). + streamRequestIdRef.current = streamRequestId flush() } if (parsed.stream?.streamId) { @@ -2473,9 +2500,26 @@ export function useChat( } case MothershipStreamV1EventType.complete: { sawCompleteEvent = true - // `complete` is terminal for this stream, even if the transport takes a moment - // longer to close. - break readLoop + // `complete` is the logical end-of-turn marker, NOT a + // guillotine on the read loop. The server commonly + // flushes a few more events in the SAME TCP chunk as + // the terminal `complete` — trailing text fragments, + // followup-action blocks, or run metadata — so we must + // drain everything that's already sitting in the + // `pendingLines` buffer before stopping. Bailing on + // first sight used to truncate the last sentence of + // the assistant response and drop followups on the + // floor, even though the server had persisted them + // correctly (trace 677af168, request + // 06ff631a-4d72-4862-ac5c-9dbdd0c380c2). + // + // We still avoid another `reader.read()` — events that + // arrive in a SUBSEQUENT chunk after `complete` would + // be a server bug; don't wait for them. Draining only + // what's already locally buffered strikes the balance: + // no lost events from the terminal chunk, no hang on + // a misbehaving transport. + continue } } } @@ -2863,12 +2907,20 @@ export function useChat( streamId?: string content?: string blocks?: ContentBlock[] + // Caller-supplied snapshot. `stopGeneration` calls + // `clearActiveTurn()` BEFORE firing this, which nulls + // `streamRequestIdRef`; anyone relying on the ref at POST time + // would send `requestId: undefined` and the persisted message + // would lose its trace id handle (Copy-request-id button + // disappears on refetch — repro: trace de69695b). + requestId?: string }) => { const chatId = overrides?.chatId ?? chatIdRef.current const streamId = overrides?.streamId ?? streamIdRef.current if (!chatId || !streamId) return const content = overrides?.content ?? streamingContentRef.current + const requestId = overrides?.requestId ?? streamRequestIdRef.current const sourceBlocks = overrides?.blocks ?? streamingBlocksRef.current const storedBlocks = sourceBlocks.map((block) => { @@ -2912,6 +2964,14 @@ export function useChat( streamId, content, ...(storedBlocks.length > 0 && { contentBlocks: storedBlocks }), + // Forward the active stream's requestId so the server can + // stamp it onto the persisted aborted assistant message — + // keeps the "Copy request ID" button working after the + // in-memory streaming message gets replaced by the persisted + // one on chat history refetch. Pulled from the resolved + // `requestId` (override preferred over ref) because the ref + // may have been cleared by the time this fetch runs. + ...(requestId ? { requestId } : {}), }), }) if (!res.ok) { @@ -2950,9 +3010,36 @@ export function useChat( const messagesRef = useRef(messages) messagesRef.current = messages + /** + * Notify downstream consumers that a turn has ended and, if a + * follow-up message is queued, kick the dispatcher. Safe to call + * from both the normal-completion path (`finalize`) and the + * abort/stop path (`stopGeneration`), which previously short- + * circuited without notifying — queued messages then sat until the + * user manually re-sent. Idempotent w.r.t. `onStreamEnd` (one call + * per terminal transition); the dispatcher itself de-dupes. + */ + const notifyTurnEnded = useCallback( + (options: { error: boolean; skipQueueDispatch?: boolean }) => { + const hasQueuedFollowUp = !options.error && messageQueueRef.current.length > 0 + if (!options.error) { + const cid = chatIdRef.current + if (cid && onStreamEndRef.current) { + onStreamEndRef.current(cid, messagesRef.current) + } + } + if (!options.error && !options.skipQueueDispatch && hasQueuedFollowUp) { + void enqueueQueueDispatchRef.current({ type: 'send_head' }) + } + return hasQueuedFollowUp + }, + [] + ) + const finalize = useCallback( (options?: { error?: boolean }) => { - const hasQueuedFollowUp = !options?.error && messageQueueRef.current.length > 0 + const isError = !!options?.error + const hasQueuedFollowUp = !isError && messageQueueRef.current.length > 0 reconcileTerminalPreviewSessions() locallyTerminalStreamIdRef.current = streamIdRef.current ?? activeTurnRef.current?.userMessageId ?? undefined @@ -2960,23 +3047,15 @@ export function useChat( setTransportIdle() abortControllerRef.current = null invalidateChatQueries({ includeDetail: !hasQueuedFollowUp }) - - if (!options?.error) { - const cid = chatIdRef.current - if (cid && onStreamEndRef.current) { - onStreamEndRef.current(cid, messagesRef.current) - } - } - - if (options?.error) { - return - } - - if (hasQueuedFollowUp) { - void enqueueQueueDispatchRef.current({ type: 'send_head' }) - } + notifyTurnEnded({ error: isError }) }, - [clearActiveTurn, invalidateChatQueries, reconcileTerminalPreviewSessions, setTransportIdle] + [ + clearActiveTurn, + invalidateChatQueries, + notifyTurnEnded, + reconcileTerminalPreviewSessions, + setTransportIdle, + ] ) finalizeRef.current = finalize @@ -3445,6 +3524,10 @@ export function useChat( ...(block.options ? { options: [...block.options] } : {}), ...(block.toolCall ? { toolCall: { ...block.toolCall } } : {}), })) + // Snapshot BEFORE clearActiveTurn() nulls the ref. The + // persistPartialResponse fetch runs inside stopBarrier below, + // after several awaits — the ref is long gone by then. + const stopRequestIdSnapshot = streamRequestIdRef.current locallyTerminalStreamIdRef.current = sid streamGenRef.current++ @@ -3532,6 +3615,7 @@ export function useChat( streamId: sid, content: stopContentSnapshot, blocks: stopBlocksSnapshot, + requestId: stopRequestIdSnapshot, }) } @@ -3545,6 +3629,13 @@ export function useChat( pendingStopPromiseRef.current = stopBarrier try { await stopBarrier + // Notify downstream (onStreamEnd) and dispatch any queued + // follow-up message. Without this, a user who queued a message + // during streaming and then hit Stop would see the queued + // message stay queued until they manually re-sent — because + // `stopGeneration` previously short-circuited the whole turn- + // end pipeline. + notifyTurnEnded({ error: false }) } catch (err) { setError(err instanceof Error ? err.message : 'Failed to stop the previous response') throw err @@ -3556,6 +3647,7 @@ export function useChat( }, [ cancelActiveWorkflowExecutions, invalidateChatQueries, + notifyTurnEnded, persistPartialResponse, queryClient, resetEphemeralPreviewState, diff --git a/apps/sim/lib/copilot/async-runs/repository.ts b/apps/sim/lib/copilot/async-runs/repository.ts index 8a79eae8e2a..ffcabd197cd 100644 --- a/apps/sim/lib/copilot/async-runs/repository.ts +++ b/apps/sim/lib/copilot/async-runs/repository.ts @@ -47,17 +47,29 @@ async function withDbSpan( const result = await fn() return result } catch (error) { + // AbortError / cancellation is a control-flow outcome, not a DB + // failure. Record the exception event but skip `codes.ERROR` so + // the trace doesn't show red spans for every aborted request + // that happened to have an in-flight async-runs query. span.recordException(error instanceof Error ? error : new Error(String(error))) - span.setStatus({ - code: SpanStatusCode.ERROR, - message: error instanceof Error ? error.message : String(error), - }) + if (!isAbortError(error)) { + span.setStatus({ + code: SpanStatusCode.ERROR, + message: error instanceof Error ? error.message : String(error), + }) + } throw error } finally { span.end() } } +function isAbortError(err: unknown): boolean { + if (err == null || typeof err !== 'object') return false + const e = err as { name?: unknown; code?: unknown } + return e.name === 'AbortError' || e.code === 'ABORT_ERR' +} + export interface CreateRunSegmentInput { id?: string executionId: string @@ -168,28 +180,31 @@ export async function getLatestRunForExecution(executionId: string) { ) } +/** + * Deliberately UN-instrumented with OTel spans. Called from a 250ms + * poll loop in `app/api/copilot/chat/stream/route.ts` during every + * resume SSE connection — at 4 Hz for the whole lifetime of the + * connection, emitting a span per poll blew up long traces with + * hundreds of noop DB spans (observed ~240 spans/minute during + * reproduction). + * + * If we ever need visibility into this query's latency, add a Prom + * histogram (aggregates cleanly) rather than per-call spans at 4 Hz. + * The raw query is also fired once-off from several non-polling call + * sites; those get accurate DB latency from the request-level + * postgres instrumentation lower down the stack. + */ export async function getLatestRunForStream(streamId: string, userId?: string) { - return withDbSpan( - 'copilot.async_runs.get_latest_for_stream', - 'SELECT', - 'copilot_runs', - { - 'copilot.stream_id': streamId, - 'copilot.user_id': userId, - }, - async () => { - const conditions = userId - ? and(eq(copilotRuns.streamId, streamId), eq(copilotRuns.userId, userId)) - : eq(copilotRuns.streamId, streamId) - const [run] = await db - .select() - .from(copilotRuns) - .where(conditions) - .orderBy(desc(copilotRuns.startedAt)) - .limit(1) - return run ?? null - } - ) + const conditions = userId + ? and(eq(copilotRuns.streamId, streamId), eq(copilotRuns.userId, userId)) + : eq(copilotRuns.streamId, streamId) + const [run] = await db + .select() + .from(copilotRuns) + .where(conditions) + .orderBy(desc(copilotRuns.startedAt)) + .limit(1) + return run ?? null } export async function getRunSegment(runId: string) { diff --git a/apps/sim/lib/copilot/chat/post.ts b/apps/sim/lib/copilot/chat/post.ts index c3889e158aa..d8437b5a7d1 100644 --- a/apps/sim/lib/copilot/chat/post.ts +++ b/apps/sim/lib/copilot/chat/post.ts @@ -405,14 +405,29 @@ function buildOnComplete(params: { if (!chatId) return + // One-writer rule on cancel paths: `/api/copilot/chat/stop` is the + // single DB writer when the user hit Stop (or the client + // disconnected). It writes the partial assistant message AND + // clears `conversationId` in the same UPDATE, filtered on + // `conversationId = streamId`. If `finalizeAssistantTurn` races + // ahead here and clears `conversationId` first, stop's UPDATE + // matches zero rows and the partial content silently vanishes on + // chat refetch (repro: trace c18de3e2 → `copilot.stop.outcome = + // 'no_matching_row'`). + // + // So: on cancel, skip finalize here and let /chat/stop run the + // terminal write. On real backend errors (`!success` without + // `cancelled`) we DO want to finalize — it clears the stream + // marker so the chat isn't stuck with a non-null `conversationId` + // and blocking future messages. + if (result.cancelled) return + try { await finalizeAssistantTurn({ chatId, userMessageId, ...(result.success - ? { - assistantMessage: buildPersistedAssistantMessage(result, requestId), - } + ? { assistantMessage: buildPersistedAssistantMessage(result, requestId) } : {}), }) diff --git a/apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts b/apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts index 19a1f6b0148..29441128eba 100644 --- a/apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts +++ b/apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts @@ -294,6 +294,7 @@ export const LlmErrorStage = { HttpStatus: "http_status", Invoke: "invoke", MarshalRequest: "marshal_request", + StreamClose: "stream_close", } as const; export type LlmErrorStageKey = keyof typeof LlmErrorStage; diff --git a/apps/sim/lib/copilot/request/go/fetch.ts b/apps/sim/lib/copilot/request/go/fetch.ts index fafcce68f13..7e92583ab58 100644 --- a/apps/sim/lib/copilot/request/go/fetch.ts +++ b/apps/sim/lib/copilot/request/go/fetch.ts @@ -90,17 +90,38 @@ export async function fetchGo(url: string, options: OutboundFetchOptions = {}): return response } catch (error) { span.setAttribute(TraceAttr.HttpResponseHeadersMs, Math.round(performance.now() - start)) - span.setStatus({ - code: SpanStatusCode.ERROR, - message: error instanceof Error ? error.message : String(error), - }) + // AbortError isn't a real failure — it's the caller (user Stop, + // orchestrator deadline, reader disconnect) asking the fetch to + // stop. Record the exception event so the trace still carries the + // forensic detail, but skip `codes.ERROR` so dashboards don't + // treat every abort as a 5xx-class incident. Mirrors the Go-side + // carve-out for `context.Canceled` in `StreamSpan.RecordError`. span.recordException(error instanceof Error ? error : new Error(String(error))) + if (!isFetchAbortError(error)) { + span.setStatus({ + code: SpanStatusCode.ERROR, + message: error instanceof Error ? error.message : String(error), + }) + } throw error } finally { span.end() } } +/** + * Matches every fetch-abort shape the Node/browser runtimes produce: + * DOMException `AbortError`, plain `Error` with `name === 'AbortError'`, + * and undici's `code === 'ABORT_ERR'`. Kept local to this module so + * there's no cross-cutting dependency; the canonical version lives in + * `lib/copilot/request/otel.ts` (`isCancellationError`). + */ +function isFetchAbortError(err: unknown): boolean { + if (err == null || typeof err !== 'object') return false + const e = err as { name?: unknown; code?: unknown } + return e.name === 'AbortError' || e.code === 'ABORT_ERR' +} + function safeParseUrl(url: string): URL | null { try { return new URL(url) diff --git a/apps/sim/lib/copilot/request/lifecycle/finalize.ts b/apps/sim/lib/copilot/request/lifecycle/finalize.ts index 229a8ae309b..3d0e5de00cd 100644 --- a/apps/sim/lib/copilot/request/lifecycle/finalize.ts +++ b/apps/sim/lib/copilot/request/lifecycle/finalize.ts @@ -5,6 +5,10 @@ import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, } from '@/lib/copilot/generated/mothership-stream-v1' +import { + type RequestTraceV1Outcome, + RequestTraceV1Outcome as RequestTraceV1OutcomeConst, +} from '@/lib/copilot/generated/request-trace-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import type { StreamWriter } from '@/lib/copilot/request/session' import type { OrchestratorResult } from '@/lib/copilot/request/types' @@ -15,20 +19,33 @@ const getTracer = () => trace.getTracer('sim-copilot-finalize', '1.0.0') /** * Single finalization path for stream results. - * Handles abort / error / success and publishes the terminal event. - * Replaces duplicated blocks in the old chat-streaming.ts. + * + * `outcome` is the classifier's resolved verdict from the caller — it + * encodes "was this cancelled, errored, or completed" WITHOUT relying + * on the raw `abortController.signal.aborted` boolean. That matters + * because a client can disconnect mid-stream without the abort + * controller ever firing (the SSE `cancel()` callback only sets + * `publisher.clientDisconnected`); the lifecycle classifies THAT as + * `cancelled` too, but a prior API passed `aborted: false` into this + * function, sending us down `handleError` and persisting an `error` + * terminal state + run status. Now the outcome is the source of truth. */ export async function finalizeStream( result: OrchestratorResult, publisher: StreamWriter, runId: string, - aborted: boolean, + outcome: RequestTraceV1Outcome, requestId: string ): Promise { - const outcome = aborted ? 'aborted' : result.success ? 'success' : 'error' + const spanOutcome = + outcome === RequestTraceV1OutcomeConst.cancelled + ? 'aborted' + : outcome === RequestTraceV1OutcomeConst.success + ? 'success' + : 'error' const span = getTracer().startSpan('copilot.finalize_stream', { attributes: { - [TraceAttr.CopilotFinalizeOutcome]: outcome, + [TraceAttr.CopilotFinalizeOutcome]: spanOutcome, 'copilot.run.id': runId, 'copilot.request.id': requestId, [TraceAttr.CopilotResultToolCalls]: result.toolCalls?.length ?? 0, @@ -39,9 +56,9 @@ export async function finalizeStream( }, }) try { - if (aborted) { + if (outcome === RequestTraceV1OutcomeConst.cancelled) { await handleAborted(result, publisher, runId, requestId) - } else if (!result.success) { + } else if (outcome === RequestTraceV1OutcomeConst.error) { span.setStatus({ code: SpanStatusCode.ERROR, message: result.error || 'orchestration failed', diff --git a/apps/sim/lib/copilot/request/lifecycle/run.ts b/apps/sim/lib/copilot/request/lifecycle/run.ts index e15414a03e1..65149c406b1 100644 --- a/apps/sim/lib/copilot/request/lifecycle/run.ts +++ b/apps/sim/lib/copilot/request/lifecycle/run.ts @@ -125,6 +125,15 @@ export async function runCopilotLifecycle( const result: OrchestratorResult = { success: context.errors.length === 0 && !context.wasAborted, + // `cancelled` is an explicit discriminator so callers can tell + // "user hit Stop" (don't clear the chat row; /chat/stop owns it) + // from "backend errored" (do clear the row so the chat isn't + // stuck with a non-null `conversationId`). An error that also + // happens to fire the abort signal still counts as an error + // path, but practically that doesn't happen in the success + // branch here — if there are errors we never reach a + // wasAborted-without-errors state. + cancelled: context.wasAborted && context.errors.length === 0, content: context.accumulatedContent, contentBlocks: context.contentBlocks, toolCalls: buildToolCallSummaries(context), @@ -139,9 +148,23 @@ export async function runCopilotLifecycle( } catch (error) { const err = error instanceof Error ? error : new Error('Copilot orchestration failed') logger.error('Copilot orchestration failed', { error: err.message }) - await lifecycleOptions.onError?.(err) + // If the abort signal fired, this throw is a consequence of the + // cancel (publisher.publish fails once the client disconnects, a + // downstream Go read throws on ctx cancel, etc.) — NOT a real + // backend error. Don't invoke `onError`, because on the cancel + // path `/api/copilot/chat/stop` is the single DB writer and + // `onError` would race with it via `finalizeAssistantTurn`, + // clearing `conversationId` before stop's UPDATE can match (see + // `buildOnComplete` in chat/post.ts for the full rationale). + // Return `cancelled: true` so upstream classification stays + // consistent with the success-path cancel result. + const wasCancelled = lifecycleOptions.abortSignal?.aborted ?? false + if (!wasCancelled) { + await lifecycleOptions.onError?.(err) + } return { success: false, + cancelled: wasCancelled, content: '', contentBlocks: [], toolCalls: [], diff --git a/apps/sim/lib/copilot/request/lifecycle/start.ts b/apps/sim/lib/copilot/request/lifecycle/start.ts index a6b4c7a9c0d..a7b73246351 100644 --- a/apps/sim/lib/copilot/request/lifecycle/start.ts +++ b/apps/sim/lib/copilot/request/lifecycle/start.ts @@ -278,30 +278,42 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS }) lifecycleResult = result - outcome = abortController.signal.aborted - ? RequestTraceV1Outcome.cancelled - : result.success - ? RequestTraceV1Outcome.success + // Outcome classification (priority order): + // 1. `result.success` → success. The orchestrator + // reporting "finished cleanly" wins over any later + // signal change. Matters for the narrow race where + // the user clicks Stop a beat after the stream + // completed. + // 2. `signal.aborted` (from `abortActiveStream` or the + // Redis-marker poller) OR `clientDisconnected` with + // a non-success result → cancelled. `recordCancelled` + // further refines into explicit_stop / client_disconnect + // / unknown via `signal.reason`. + // 3. Otherwise → error. + outcome = result.success + ? RequestTraceV1Outcome.success + : abortController.signal.aborted || publisher.clientDisconnected + ? RequestTraceV1Outcome.cancelled : RequestTraceV1Outcome.error if (outcome === RequestTraceV1Outcome.cancelled) { recordCancelled() } - await finalizeStream( - result, - publisher, - runId, - abortController.signal.aborted, - requestId - ) + // Pass the resolved outcome — not `signal.aborted` — so + // `finalizeStream` classifies the same way we did above. + // A client-disconnect-without-controller-abort still needs + // to hit `handleAborted` (not `handleError`) so the chat + // row gets `cancelled` terminal state instead of `error`. + await finalizeStream(result, publisher, runId, outcome, requestId) } catch (error) { - outcome = abortController.signal.aborted + // Error-path classification: if the abort signal fired or + // the client disconnected, treat the thrown error as a + // cancel (same rationale as the try-path above). + const wasCancelled = + abortController.signal.aborted || publisher.clientDisconnected + outcome = wasCancelled ? RequestTraceV1Outcome.cancelled : RequestTraceV1Outcome.error if (outcome === RequestTraceV1Outcome.cancelled) { - // Error-path cancel: typically the stream raised before - // it saw the abort signal. Same classification rules as - // the happy path — we just also thread the error's - // message onto the event for triage. recordCancelled(error instanceof Error ? error.message : String(error)) } if (publisher.clientDisconnected) { @@ -318,13 +330,7 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS toolCalls: [], error: 'An unexpected error occurred while processing the response.', } - await finalizeStream( - syntheticResult, - publisher, - runId, - abortController.signal.aborted, - requestId - ) + await finalizeStream(syntheticResult, publisher, runId, outcome, requestId) } finally { collector.endSpan( requestSpan, @@ -390,6 +396,26 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS }) }, cancel() { + // The browser's SSE reader closed. Flip `clientDisconnected` so + // in-flight `publisher.publish` calls silently no-op (prevents + // enqueueing on a closed controller). + // + // Intentionally does NOT fire the AbortController here. The + // abort controller is reserved for actual "abort this request" + // semantics (driven by `abortActiveStream()` on an explicit Stop + // or the Redis-marker poller for cross-node Stops). Firing it + // on browser disconnect means a successful stream that loses + // its reader at the last moment would get retroactively + // classified as aborted — which skips persisting the assistant + // message (see trace 707f2614 where the whole response + // disappeared after completion). + // + // Trade-off: on a true tab close, the orchestrator keeps reading + // events from Go until Go's stream ends, with `publish` no-op'ing + // each one. That's wasted LLM work but it's safe — the message + // gets persisted and the next chat reload shows it. An + // explicit Stop short-circuits this path cleanly via the + // /chat/abort handler, which DOES fire the AbortController. publisher.markDisconnected() }, }) diff --git a/apps/sim/lib/copilot/request/otel.ts b/apps/sim/lib/copilot/request/otel.ts index b32ff46be44..c42b0c705db 100644 --- a/apps/sim/lib/copilot/request/otel.ts +++ b/apps/sim/lib/copilot/request/otel.ts @@ -41,6 +41,53 @@ function isGenAIMessageCaptureEnabled(): boolean { return raw === 'true' || raw === '1' || raw === 'yes' } +/** + * Returns true if `err` is a user-initiated / upstream cancellation + * rather than a genuine failure. We check every flavor that the + * JS/Node runtime surfaces when an `AbortSignal` fires: + * + * - `DOMException` with `name === 'AbortError'` (browser + Node 18+ fetch) + * - plain `Error` with `name === 'AbortError'` (older polyfills) + * - Node's undici-shaped `code === 'ABORT_ERR'` + * - Bare `'AbortError'` strings rethrown as errors + * + * Callers use this to suppress `SpanStatusCode.ERROR` on cancel paths — + * dashboards should not light up red every time a user hits Stop. + * Matches the Go-side treatment of `context.Canceled` / + * `context.DeadlineExceeded` in `internal/core/errors.go:RecordError` + * and `internal/storage/postgres/tracing.go:dbSpan.End`. + */ +function isCancellationError(err: unknown): boolean { + if (err == null) return false + if (typeof err === 'object') { + const e = err as { name?: unknown; code?: unknown; message?: unknown } + if (e.name === 'AbortError') return true + if (e.code === 'ABORT_ERR') return true + // Some wrappers stringify into the message but lose the name. + if (typeof e.message === 'string' && /aborted|AbortError/i.test(e.message)) { + return true + } + } + return false +} + +/** + * Apply terminal status to `span` based on whether the thrown `error` + * is a real failure or a cancellation. Always records the exception + * event for forensics; only sets `codes.ERROR` for real failures. + * Centralized so every span wrapper has identical classification. + */ +function markSpanForError(span: Span, error: unknown): void { + const asError = error instanceof Error ? error : new Error(String(error)) + span.recordException(asError) + if (!isCancellationError(error)) { + span.setStatus({ + code: SpanStatusCode.ERROR, + message: error instanceof Error ? error.message : String(error), + }) + } +} + /** * Canonical OTel GenAI message shape used for both input and output * attributes. Kept minimal — only the three part types we actually @@ -205,11 +252,7 @@ export async function withIncomingGoSpan( span.setStatus({ code: SpanStatusCode.OK }) return result } catch (error) { - span.setStatus({ - code: SpanStatusCode.ERROR, - message: error instanceof Error ? error.message : String(error), - }) - span.recordException(error instanceof Error ? error : new Error(String(error))) + markSpanForError(span, error) throw error } finally { span.end() @@ -253,11 +296,7 @@ export async function withCopilotSpan( span.setStatus({ code: SpanStatusCode.OK }) return result } catch (error) { - span.setStatus({ - code: SpanStatusCode.ERROR, - message: error instanceof Error ? error.message : String(error), - }) - span.recordException(error instanceof Error ? error : new Error(String(error))) + markSpanForError(span, error) throw error } finally { span.end() @@ -308,11 +347,7 @@ export async function withCopilotToolSpan( span.setStatus({ code: SpanStatusCode.OK }) return result } catch (error) { - span.setStatus({ - code: SpanStatusCode.ERROR, - message: error instanceof Error ? error.message : String(error), - }) - span.recordException(error instanceof Error ? error : new Error(String(error))) + markSpanForError(span, error) throw error } finally { span.end() @@ -522,17 +557,25 @@ export function startCopilotOtelRoot(scope: CopilotOtelScope): CopilotOtelRoot { const finish: CopilotOtelRoot['finish'] = (outcome, error) => { if (finished) return finished = true - span.setAttribute( - TraceAttr.CopilotRequestOutcome, - outcome ?? RequestTraceV1Outcome.success - ) + const resolvedOutcome = outcome ?? RequestTraceV1Outcome.success + span.setAttribute(TraceAttr.CopilotRequestOutcome, resolvedOutcome) if (error) { - span.setStatus({ - code: SpanStatusCode.ERROR, - message: error instanceof Error ? error.message : String(error), - }) - span.recordException(error instanceof Error ? error : new Error(String(error))) - } else if (outcome === RequestTraceV1Outcome.success) { + // `markSpanForError` records the exception event but only sets + // `codes.ERROR` for real failures — a cancellation-shaped error + // here stays `unset` (or `OK` if we resolve it below) so the + // trace doesn't look red when the user intentionally stopped. + markSpanForError(span, error) + if (isCancellationError(error)) { + span.setStatus({ code: SpanStatusCode.OK }) + } + } else if ( + resolvedOutcome === RequestTraceV1Outcome.success || + resolvedOutcome === RequestTraceV1Outcome.cancelled + ) { + // Explicitly mark cancelled outcomes as OK so dashboards keying + // off span status don't treat "user hit Stop" as a failure — the + // rich detail lives on `copilot.request.cancel_reason` and the + // `request.cancelled` event. span.setStatus({ code: SpanStatusCode.OK }) } span.end() @@ -612,20 +655,21 @@ export async function withCopilotOtelContext( ? span : trace.wrapSpanContext(createFallbackSpanContext()) const otelContext = trace.setSpan(parentContext, carrierSpan) - let sawError = false + let terminalStatusSet = false try { - return await context.with(otelContext, () => fn(otelContext)) + const result = await context.with(otelContext, () => fn(otelContext)) + span.setStatus({ code: SpanStatusCode.OK }) + terminalStatusSet = true + return result } catch (error) { - sawError = true - span.setStatus({ - code: SpanStatusCode.ERROR, - message: error instanceof Error ? error.message : String(error), - }) - span.recordException(error instanceof Error ? error : new Error(String(error))) + markSpanForError(span, error) + terminalStatusSet = true throw error } finally { - if (!sawError) { + if (!terminalStatusSet) { + // Extremely defensive: should be unreachable, but avoids leaking + // an unset span status if some future refactor breaks both arms. span.setStatus({ code: SpanStatusCode.OK }) } span.end() diff --git a/apps/sim/lib/copilot/request/session/abort.ts b/apps/sim/lib/copilot/request/session/abort.ts index b5f8ec6b04b..2bbc975a010 100644 --- a/apps/sim/lib/copilot/request/session/abort.ts +++ b/apps/sim/lib/copilot/request/session/abort.ts @@ -257,9 +257,9 @@ export const AbortReason = { /** Same-process stop: browser→Sim→abortActiveStream. */ UserStop: 'user_stop:abortActiveStream', /** - * Cross-process stop: the SIM node that held the SSE didn't receive - * the Stop HTTP call, but it polled the Redis abort marker that the - * node that DID receive it wrote, and aborts on the poll. + * Cross-process stop: the Sim node that holds the SSE didn't + * receive the Stop HTTP call, but it polled the Redis abort marker + * that the node that DID receive it wrote, and aborts on the poll. */ RedisPoller: 'redis_abort_marker:poller', /** Internal timeout on the outbound explicit-abort fetch to Go. */ diff --git a/apps/sim/lib/copilot/request/session/explicit-abort.ts b/apps/sim/lib/copilot/request/session/explicit-abort.ts index 223fff9266d..b27d6ab6504 100644 --- a/apps/sim/lib/copilot/request/session/explicit-abort.ts +++ b/apps/sim/lib/copilot/request/session/explicit-abort.ts @@ -49,7 +49,7 @@ export async function requestExplicitStreamAbort(params: { spanName: 'sim → go /api/streams/explicit-abort', operation: 'explicit_abort', attributes: { - 'copilot.stream.id': streamId, + [TraceAttr.StreamId]: streamId, ...(chatId ? { [TraceAttr.ChatId]: chatId } : {}), }, }) diff --git a/apps/sim/lib/copilot/request/types.ts b/apps/sim/lib/copilot/request/types.ts index 0cf48491929..fd296cd52ca 100644 --- a/apps/sim/lib/copilot/request/types.ts +++ b/apps/sim/lib/copilot/request/types.ts @@ -140,6 +140,19 @@ export interface OrchestratorOptions { export interface OrchestratorResult { success: boolean + /** + * True iff the non-success outcome was a user-initiated cancel + * (abort signal fired or client disconnected). Lets callers treat + * cancels differently from actual errors — notably, `buildOnComplete` + * must NOT finalize the chat row on cancel, because the browser's + * `/api/copilot/chat/stop` POST owns writing the partial assistant + * content and clearing `conversationId` in one UPDATE. Finalizing + * here would race and clear `conversationId` first, making the stop + * UPDATE match zero rows and the partial content vanish on refetch. + * + * Always false when `success=true`. + */ + cancelled?: boolean content: string contentBlocks: ContentBlock[] toolCalls: ToolCallSummary[] From 0bb67c91d7e657d27794dc5b3d714c05ddd8b7c5 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Sat, 18 Apr 2026 16:34:53 -0700 Subject: [PATCH 09/10] Otel v3 --- apps/sim/app/api/billing/update-cost/route.ts | 2 +- .../api/copilot/api-keys/validate/route.ts | 7 +- apps/sim/app/api/copilot/chat/abort/route.ts | 2 +- apps/sim/app/api/copilot/chat/stop/route.ts | 2 +- apps/sim/app/api/copilot/chat/stream/route.ts | 6 +- apps/sim/app/api/copilot/confirm/route.ts | 2 +- .../message-actions/message-actions.tsx | 43 +- .../[workspaceId]/home/hooks/use-chat.ts | 6 +- apps/sim/lib/copilot/chat/post.ts | 52 +- apps/sim/lib/copilot/chat/terminal-state.ts | 7 +- .../generated/mothership-stream-v1-schema.ts | 3006 +++++----- .../copilot/generated/mothership-stream-v1.ts | 746 +-- .../lib/copilot/generated/request-trace-v1.ts | 143 +- .../lib/copilot/generated/tool-catalog-v1.ts | 3660 +++++++++--- .../lib/copilot/generated/tool-schemas-v1.ts | 4972 ++++++++--------- .../generated/trace-attribute-values-v1.ts | 507 +- .../copilot/generated/trace-attributes-v1.ts | 1912 +++---- .../lib/copilot/generated/trace-events-v1.ts | 56 +- .../lib/copilot/generated/trace-spans-v1.ts | 272 +- apps/sim/lib/copilot/request/go/fetch.ts | 2 +- .../lib/copilot/request/lifecycle/headless.ts | 6 + apps/sim/lib/copilot/request/lifecycle/run.ts | 8 +- .../lib/copilot/request/lifecycle/start.ts | 20 +- apps/sim/lib/copilot/request/otel.ts | 70 +- apps/sim/lib/copilot/request/session/abort.ts | 11 +- apps/sim/lib/copilot/request/session/index.ts | 2 +- .../lib/copilot/request/session/recovery.ts | 2 +- apps/sim/lib/copilot/request/subagent.ts | 4 +- apps/sim/lib/copilot/request/tools/files.ts | 2 +- apps/sim/lib/copilot/request/tools/tables.ts | 3 +- apps/sim/lib/copilot/request/trace.ts | 8 + .../tools/client/run-tool-execution.ts | 2 +- apps/sim/lib/copilot/vfs/file-reader.ts | 7 +- 33 files changed, 8643 insertions(+), 6907 deletions(-) diff --git a/apps/sim/app/api/billing/update-cost/route.ts b/apps/sim/app/api/billing/update-cost/route.ts index 222c9ed5375..9269324bd57 100644 --- a/apps/sim/app/api/billing/update-cost/route.ts +++ b/apps/sim/app/api/billing/update-cost/route.ts @@ -4,6 +4,7 @@ import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { recordUsage } from '@/lib/billing/core/usage-log' import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing' +import { BillingRouteOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { checkInternalApiKey } from '@/lib/copilot/request/http' @@ -11,7 +12,6 @@ import { withIncomingGoSpan } from '@/lib/copilot/request/otel' import { isBillingEnabled } from '@/lib/core/config/feature-flags' import { type AtomicClaimResult, billingIdempotency } from '@/lib/core/idempotency/service' import { generateRequestId } from '@/lib/core/utils/request' -import { BillingRouteOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('BillingUpdateCostAPI') diff --git a/apps/sim/app/api/copilot/api-keys/validate/route.ts b/apps/sim/app/api/copilot/api-keys/validate/route.ts index 85aad942c4b..f52e2f34ee4 100644 --- a/apps/sim/app/api/copilot/api-keys/validate/route.ts +++ b/apps/sim/app/api/copilot/api-keys/validate/route.ts @@ -5,11 +5,11 @@ import { eq } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { checkServerSideUsageLimits } from '@/lib/billing/calculations/usage-monitor' +import { CopilotValidateOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { checkInternalApiKey } from '@/lib/copilot/request/http' import { withIncomingGoSpan } from '@/lib/copilot/request/otel' -import { CopilotValidateOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('CopilotApiKeysValidate') @@ -33,7 +33,10 @@ export async function POST(req: NextRequest) { try { const auth = checkInternalApiKey(req) if (!auth.success) { - span.setAttribute(TraceAttr.CopilotValidateOutcome, CopilotValidateOutcome.InternalAuthFailed) + span.setAttribute( + TraceAttr.CopilotValidateOutcome, + CopilotValidateOutcome.InternalAuthFailed + ) span.setAttribute(TraceAttr.HttpStatusCode, 401) return new NextResponse(null, { status: 401 }) } diff --git a/apps/sim/app/api/copilot/chat/abort/route.ts b/apps/sim/app/api/copilot/chat/abort/route.ts index aa41c5bc940..b3bd74befd5 100644 --- a/apps/sim/app/api/copilot/chat/abort/route.ts +++ b/apps/sim/app/api/copilot/chat/abort/route.ts @@ -2,6 +2,7 @@ import { createLogger } from '@sim/logger' import { NextResponse } from 'next/server' import { getLatestRunForStream } from '@/lib/copilot/async-runs/repository' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { CopilotAbortOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { fetchGo } from '@/lib/copilot/request/go/fetch' @@ -9,7 +10,6 @@ import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/htt import { withCopilotSpan, withIncomingGoSpan } from '@/lib/copilot/request/otel' import { abortActiveStream, waitForPendingChatStream } from '@/lib/copilot/request/session' import { env } from '@/lib/core/config/env' -import { CopilotAbortOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('CopilotChatAbortAPI') const GO_EXPLICIT_ABORT_TIMEOUT_MS = 3000 diff --git a/apps/sim/app/api/copilot/chat/stop/route.ts b/apps/sim/app/api/copilot/chat/stop/route.ts index 569b9487bb2..ea8a9216f03 100644 --- a/apps/sim/app/api/copilot/chat/stop/route.ts +++ b/apps/sim/app/api/copilot/chat/stop/route.ts @@ -6,12 +6,12 @@ import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { getSession } from '@/lib/auth' import { normalizeMessage, type PersistedMessage } from '@/lib/copilot/chat/persisted-message' +import { CopilotStopOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withIncomingGoSpan } from '@/lib/copilot/request/otel' import { taskPubSub } from '@/lib/copilot/tasks' import { generateId } from '@/lib/core/utils/uuid' -import { CopilotStopOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('CopilotChatStopAPI') diff --git a/apps/sim/app/api/copilot/chat/stream/route.ts b/apps/sim/app/api/copilot/chat/stream/route.ts index bfeb2a5c3d9..df7a0c86dcc 100644 --- a/apps/sim/app/api/copilot/chat/stream/route.ts +++ b/apps/sim/app/api/copilot/chat/stream/route.ts @@ -6,11 +6,11 @@ import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, } from '@/lib/copilot/generated/mothership-stream-v1' -import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { CopilotResumeOutcome, CopilotTransport, } from '@/lib/copilot/generated/trace-attribute-values-v1' +import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { contextFromRequestHeaders } from '@/lib/copilot/request/go/propagation' import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http' import { getCopilotTracer } from '@/lib/copilot/request/otel' @@ -144,9 +144,7 @@ export async function GET(request: NextRequest) { 'copilot.resume.request', { attributes: { - [TraceAttr.CopilotTransport]: batchMode - ? CopilotTransport.Batch - : CopilotTransport.Stream, + [TraceAttr.CopilotTransport]: batchMode ? CopilotTransport.Batch : CopilotTransport.Stream, [TraceAttr.StreamId]: streamId, [TraceAttr.UserId]: authenticatedUserId, [TraceAttr.CopilotResumeAfterCursor]: afterCursor || '0', diff --git a/apps/sim/app/api/copilot/confirm/route.ts b/apps/sim/app/api/copilot/confirm/route.ts index b646ea7cc3b..7693496e1ef 100644 --- a/apps/sim/app/api/copilot/confirm/route.ts +++ b/apps/sim/app/api/copilot/confirm/route.ts @@ -13,6 +13,7 @@ import { getRunSegment, upsertAsyncToolCall, } from '@/lib/copilot/async-runs/repository' +import { CopilotConfirmOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { publishToolConfirmation } from '@/lib/copilot/persistence/tool-confirm' @@ -25,7 +26,6 @@ import { createUnauthorizedResponse, } from '@/lib/copilot/request/http' import { withIncomingGoSpan } from '@/lib/copilot/request/otel' -import { CopilotConfirmOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('CopilotConfirmAPI') diff --git a/apps/sim/app/workspace/[workspaceId]/components/message-actions/message-actions.tsx b/apps/sim/app/workspace/[workspaceId]/components/message-actions/message-actions.tsx index 322b0e98bf9..84f1ce4cada 100644 --- a/apps/sim/app/workspace/[workspaceId]/components/message-actions/message-actions.tsx +++ b/apps/sim/app/workspace/[workspaceId]/components/message-actions/message-actions.tsx @@ -138,15 +138,16 @@ export function MessageActions({ content, chatId, userQuery, requestId }: Messag } }, []) - // Render the action row whenever there's ANYTHING to act on. For a - // normal assistant turn that's `content`; for an aborted / error / - // content-less turn it's the `requestId` alone — users still need to - // be able to grab the trace ID for bug reports in those cases. const hasContent = Boolean(content) - if (!hasContent && !requestId) return null - const canSubmitFeedback = Boolean(chatId && userQuery) + // Render the action row whenever there's something the user can + // actually act on: copy the message, or open the feedback modal + // (thumbs up / down). Request ID alone is not a reason to render the + // row anymore — it's only exposed from inside the thumbs-down modal, + // which requires both chatId and userQuery. + if (!hasContent && !canSubmitFeedback) return null + return ( <>
@@ -197,27 +198,15 @@ export function MessageActions({ content, chatId, userQuery, requestId }: Messag )} - {requestId && ( - - - - - - {copiedRequestId ? 'Copied request ID' : 'Copy request ID'} - - - )} + {/* + Intentionally NO root-row "Copy request ID" button here — it + rendered as an ambiguous standalone Copy icon next to the + message Copy icon, which was confusing (two indistinguishable + copy buttons side by side). The request ID only needs to be + grabbable from the thumbs-down feedback modal below, which is + the surface we actually want people to use when reporting a + bad response. + */}
diff --git a/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts b/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts index 713632bf8a0..645bf447c13 100644 --- a/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts +++ b/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts @@ -1833,7 +1833,7 @@ export function useChat( try { const pendingLines: string[] = [] - readLoop: while (true) { + while (true) { if (pendingLines.length === 0) { // Once the terminal `complete` event has been processed, // don't read another chunk — we've drained everything @@ -2955,9 +2955,7 @@ export function useChat( method: 'POST', headers: { 'Content-Type': 'application/json', - ...(streamTraceparentRef.current - ? { traceparent: streamTraceparentRef.current } - : {}), + ...(streamTraceparentRef.current ? { traceparent: streamTraceparentRef.current } : {}), }, body: JSON.stringify({ chatId, diff --git a/apps/sim/lib/copilot/chat/post.ts b/apps/sim/lib/copilot/chat/post.ts index d8437b5a7d1..505c6d1d9ba 100644 --- a/apps/sim/lib/copilot/chat/post.ts +++ b/apps/sim/lib/copilot/chat/post.ts @@ -21,11 +21,7 @@ import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context' import { COPILOT_REQUEST_MODES } from '@/lib/copilot/constants' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' -import { - createBadRequestResponse, - createRequestTracker, - createUnauthorizedResponse, -} from '@/lib/copilot/request/http' +import { createBadRequestResponse, createUnauthorizedResponse } from '@/lib/copilot/request/http' import { createSSEStream, SSE_RESPONSE_HEADERS } from '@/lib/copilot/request/lifecycle/start' import { startCopilotOtelRoot, withCopilotSpan } from '@/lib/copilot/request/otel' import { @@ -609,17 +605,27 @@ async function resolveBranch(params: { } export async function handleUnifiedChatPost(req: NextRequest) { - const tracker = createRequestTracker(false) let actualChatId: string | undefined let userMessageId = '' let chatStreamLockAcquired = false - // Started once we know the streamId (= userMessageId). Every subsequent - // span (persistUserMessage, createRunSegment, the whole SSE stream, etc.) - // nests under this root via AsyncLocalStorage / explicit propagation, - // and the stream's terminal code path calls finish() when the request - // actually ends. Errors thrown from the handler before the stream - // starts are finished here in the catch below. + // Started once we've parsed the body (need userMessageId to stamp as + // streamId). Every subsequent span (persistUserMessage, + // createRunSegment, the whole SSE stream, etc.) nests under this + // root via AsyncLocalStorage / explicit propagation, and the stream's + // terminal code path calls finish() when the request actually ends. + // Errors thrown from the handler before the stream starts are + // finished here in the catch below. let otelRoot: ReturnType | undefined + // `requestId` is the canonical logical ID for this HTTP request — + // same value that flows into `request.id`/`sim.request_id` span + // attributes, the persisted `msg.requestId`, and eventually the + // Grafana trace-ID search box. Derived from otelRoot.requestId (= the + // OTel trace ID of the root span) as soon as that's created. Stays + // empty only in the narrow window before otelRoot is set — errors in + // that window can't be correlated to any trace anyway, and their log + // line carries the error message + stack which is the actually + // useful info. + let requestId = '' const executionId = crypto.randomUUID() const runId = crypto.randomUUID() @@ -635,7 +641,11 @@ export async function handleUnifiedChatPost(req: NextRequest) { userMessageId = body.userMessageId || crypto.randomUUID() otelRoot = startCopilotOtelRoot({ - requestId: tracker.requestId, + // No explicit requestId — startCopilotOtelRoot derives it from + // the span's OTel trace ID so `msg.requestId` on the UI side + // ends up being the same value Grafana uses. See the scope + // doc-comment and the call site for why this is the desired + // direction of the unification. streamId: userMessageId, executionId, runId, @@ -646,6 +656,12 @@ export async function handleUnifiedChatPost(req: NextRequest) { // by setInputMessages above. userMessagePreview: body.message, }) + // Promote the OTel-derived ID to the handler-level `requestId` so + // every downstream consumer (logs, orchestrator, onComplete, + // onError, persisted assistant message) uses the same value. + if (otelRoot.requestId) { + requestId = otelRoot.requestId + } // Emit `gen_ai.input.messages` on the root agent span for OTel // GenAI spec compliance (Honeycomb's Gen AI view keys off this). // Gated on OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT @@ -800,7 +816,7 @@ export async function handleUnifiedChatPost(req: NextRequest) { message: body.message, workspaceId, chatId: actualChatId, - requestId: tracker.requestId, + requestId, }), otelRoot!.context ) @@ -908,7 +924,7 @@ export async function handleUnifiedChatPost(req: NextRequest) { message: body.message, titleModel: branch.titleModel, ...(branch.titleProvider ? { titleProvider: branch.titleProvider } : {}), - requestId: tracker.requestId, + requestId, workspaceId, otelRoot: otelRoot!, orchestrateOptions: { @@ -925,7 +941,7 @@ export async function handleUnifiedChatPost(req: NextRequest) { onComplete: buildOnComplete({ chatId: actualChatId, userMessageId, - requestId: tracker.requestId, + requestId, workspaceId, notifyWorkspaceStatus: branch.notifyWorkspaceStatus, otelRoot, @@ -933,7 +949,7 @@ export async function handleUnifiedChatPost(req: NextRequest) { onError: buildOnError({ chatId: actualChatId, userMessageId, - requestId: tracker.requestId, + requestId, workspaceId, notifyWorkspaceStatus: branch.notifyWorkspaceStatus, }), @@ -970,7 +986,7 @@ export async function handleUnifiedChatPost(req: NextRequest) { ) } - logger.error(`[${tracker.requestId}] Error handling unified chat request`, { + logger.error(`[${requestId}] Error handling unified chat request`, { error: error instanceof Error ? error.message : 'Unknown error', stack: error instanceof Error ? error.stack : undefined, }) diff --git a/apps/sim/lib/copilot/chat/terminal-state.ts b/apps/sim/lib/copilot/chat/terminal-state.ts index e620dc27957..f408ab80feb 100644 --- a/apps/sim/lib/copilot/chat/terminal-state.ts +++ b/apps/sim/lib/copilot/chat/terminal-state.ts @@ -2,10 +2,10 @@ import { db } from '@sim/db' import { copilotChats } from '@sim/db/schema' import { and, eq, sql } from 'drizzle-orm' import type { PersistedMessage } from '@/lib/copilot/chat/persisted-message' +import { CopilotChatFinalizeOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' -import { CopilotChatFinalizeOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' interface FinalizeAssistantTurnParams { chatId: string @@ -66,7 +66,10 @@ export async function finalizeAssistantTurn({ messages: sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb`, }) .where(updateWhere) - span.setAttribute(TraceAttr.ChatFinalizeOutcome, CopilotChatFinalizeOutcome.AppendedAssistant) + span.setAttribute( + TraceAttr.ChatFinalizeOutcome, + CopilotChatFinalizeOutcome.AppendedAssistant + ) return } diff --git a/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts b/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts index b702b8de3db..1c670b37b54 100644 --- a/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts +++ b/apps/sim/lib/copilot/generated/mothership-stream-v1-schema.ts @@ -5,1849 +5,1421 @@ export type JsonSchema = unknown export const MOTHERSHIP_STREAM_V1_SCHEMA: JsonSchema = { - "$defs": { - "MothershipStreamV1AdditionalPropertiesMap": { - "additionalProperties": true, - "type": "object" - }, - "MothershipStreamV1AsyncToolRecordStatus": { - "enum": [ - "pending", - "running", - "completed", - "failed", - "cancelled", - "delivered" - ], - "type": "string" - }, - "MothershipStreamV1CheckpointPauseEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1CheckpointPausePayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "run" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + $defs: { + MothershipStreamV1AdditionalPropertiesMap: { + additionalProperties: true, + type: 'object', + }, + MothershipStreamV1AsyncToolRecordStatus: { + enum: ['pending', 'running', 'completed', 'failed', 'cancelled', 'delivered'], + type: 'string', + }, + MothershipStreamV1CheckpointPauseEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1CheckpointPausePayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['run'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1CheckpointPauseFrame": { - "additionalProperties": false, - "properties": { - "parentToolCallId": { - "type": "string" - }, - "parentToolName": { - "type": "string" - }, - "pendingToolIds": { - "items": { - "type": "string" + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1CheckpointPauseFrame: { + additionalProperties: false, + properties: { + parentToolCallId: { + type: 'string', + }, + parentToolName: { + type: 'string', + }, + pendingToolIds: { + items: { + type: 'string', }, - "type": "array" - } + type: 'array', + }, }, - "required": [ - "parentToolCallId", - "parentToolName", - "pendingToolIds" - ], - "type": "object" - }, - "MothershipStreamV1CheckpointPausePayload": { - "additionalProperties": false, - "properties": { - "checkpointId": { - "type": "string" - }, - "executionId": { - "type": "string" - }, - "frames": { - "items": { - "$ref": "#/$defs/MothershipStreamV1CheckpointPauseFrame" + required: ['parentToolCallId', 'parentToolName', 'pendingToolIds'], + type: 'object', + }, + MothershipStreamV1CheckpointPausePayload: { + additionalProperties: false, + properties: { + checkpointId: { + type: 'string', + }, + executionId: { + type: 'string', + }, + frames: { + items: { + $ref: '#/$defs/MothershipStreamV1CheckpointPauseFrame', }, - "type": "array" + type: 'array', }, - "kind": { - "enum": [ - "checkpoint_pause" - ], - "type": "string" + kind: { + enum: ['checkpoint_pause'], + type: 'string', }, - "pendingToolCallIds": { - "items": { - "type": "string" + pendingToolCallIds: { + items: { + type: 'string', }, - "type": "array" + type: 'array', + }, + runId: { + type: 'string', }, - "runId": { - "type": "string" - } }, - "required": [ - "kind", - "checkpointId", - "runId", - "executionId", - "pendingToolCallIds" - ], - "type": "object" - }, - "MothershipStreamV1CompactionDoneData": { - "additionalProperties": false, - "properties": { - "summary_chars": { - "type": "integer" - } + required: ['kind', 'checkpointId', 'runId', 'executionId', 'pendingToolCallIds'], + type: 'object', + }, + MothershipStreamV1CompactionDoneData: { + additionalProperties: false, + properties: { + summary_chars: { + type: 'integer', + }, }, - "required": [ - "summary_chars" - ], - "type": "object" - }, - "MothershipStreamV1CompactionDoneEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1CompactionDonePayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "run" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['summary_chars'], + type: 'object', + }, + MothershipStreamV1CompactionDoneEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1CompactionDonePayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['run'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1CompactionDonePayload": { - "additionalProperties": false, - "properties": { - "data": { - "$ref": "#/$defs/MothershipStreamV1CompactionDoneData" - }, - "kind": { - "enum": [ - "compaction_done" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1CompactionDonePayload: { + additionalProperties: false, + properties: { + data: { + $ref: '#/$defs/MothershipStreamV1CompactionDoneData', + }, + kind: { + enum: ['compaction_done'], + type: 'string', + }, }, - "required": [ - "kind" - ], - "type": "object" - }, - "MothershipStreamV1CompactionStartEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1CompactionStartPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "run" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind'], + type: 'object', + }, + MothershipStreamV1CompactionStartEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1CompactionStartPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['run'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1CompactionStartPayload": { - "additionalProperties": false, - "properties": { - "kind": { - "enum": [ - "compaction_start" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1CompactionStartPayload: { + additionalProperties: false, + properties: { + kind: { + enum: ['compaction_start'], + type: 'string', + }, }, - "required": [ - "kind" - ], - "type": "object" - }, - "MothershipStreamV1CompleteEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1CompletePayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "complete" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind'], + type: 'object', + }, + MothershipStreamV1CompleteEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1CompletePayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['complete'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1CompletePayload": { - "additionalProperties": false, - "properties": { - "cost": { - "$ref": "#/$defs/MothershipStreamV1CostData" - }, - "reason": { - "type": "string" - }, - "response": true, - "status": { - "$ref": "#/$defs/MothershipStreamV1CompletionStatus" - }, - "usage": { - "$ref": "#/$defs/MothershipStreamV1UsageData" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1CompletePayload: { + additionalProperties: false, + properties: { + cost: { + $ref: '#/$defs/MothershipStreamV1CostData', + }, + reason: { + type: 'string', + }, + response: true, + status: { + $ref: '#/$defs/MothershipStreamV1CompletionStatus', + }, + usage: { + $ref: '#/$defs/MothershipStreamV1UsageData', + }, }, - "required": [ - "status" - ], - "type": "object" - }, - "MothershipStreamV1CompletionStatus": { - "enum": [ - "complete", - "error", - "cancelled" - ], - "type": "string" - }, - "MothershipStreamV1CostData": { - "additionalProperties": false, - "properties": { - "input": { - "type": "number" - }, - "output": { - "type": "number" - }, - "total": { - "type": "number" - } + required: ['status'], + type: 'object', + }, + MothershipStreamV1CompletionStatus: { + enum: ['complete', 'error', 'cancelled'], + type: 'string', + }, + MothershipStreamV1CostData: { + additionalProperties: false, + properties: { + input: { + type: 'number', + }, + output: { + type: 'number', + }, + total: { + type: 'number', + }, }, - "type": "object" + type: 'object', }, - "MothershipStreamV1ErrorEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1ErrorPayload" + MothershipStreamV1ErrorEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1ErrorPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" + seq: { + type: 'integer', }, - "seq": { - "type": "integer" + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" + ts: { + type: 'string', }, - "ts": { - "type": "string" + type: { + enum: ['error'], + type: 'string', }, - "type": { - "enum": [ - "error" - ], - "type": "string" + v: { + enum: [1], + type: 'integer', }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1ErrorPayload": { - "additionalProperties": false, - "properties": { - "code": { - "type": "string" - }, - "data": true, - "displayMessage": { - "type": "string" - }, - "error": { - "type": "string" - }, - "message": { - "type": "string" - }, - "provider": { - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1ErrorPayload: { + additionalProperties: false, + properties: { + code: { + type: 'string', + }, + data: true, + displayMessage: { + type: 'string', + }, + error: { + type: 'string', + }, + message: { + type: 'string', + }, + provider: { + type: 'string', + }, }, - "required": [ - "message" - ], - "type": "object" - }, - "MothershipStreamV1EventEnvelopeCommon": { - "additionalProperties": false, - "properties": { - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['message'], + type: 'object', + }, + MothershipStreamV1EventEnvelopeCommon: { + additionalProperties: false, + properties: { + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream" - ], - "type": "object" - }, - "MothershipStreamV1EventType": { - "enum": [ - "session", - "text", - "tool", - "span", - "resource", - "run", - "error", - "complete" - ], - "type": "string" - }, - "MothershipStreamV1ResourceDescriptor": { - "additionalProperties": false, - "properties": { - "id": { - "type": "string" - }, - "title": { - "type": "string" - }, - "type": { - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream'], + type: 'object', + }, + MothershipStreamV1EventType: { + enum: ['session', 'text', 'tool', 'span', 'resource', 'run', 'error', 'complete'], + type: 'string', + }, + MothershipStreamV1ResourceDescriptor: { + additionalProperties: false, + properties: { + id: { + type: 'string', + }, + title: { + type: 'string', + }, + type: { + type: 'string', + }, }, - "required": [ - "type", - "id" - ], - "type": "object" - }, - "MothershipStreamV1ResourceOp": { - "enum": [ - "upsert", - "remove" - ], - "type": "string" - }, - "MothershipStreamV1ResourceRemoveEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1ResourceRemovePayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "resource" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['type', 'id'], + type: 'object', + }, + MothershipStreamV1ResourceOp: { + enum: ['upsert', 'remove'], + type: 'string', + }, + MothershipStreamV1ResourceRemoveEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1ResourceRemovePayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['resource'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1ResourceRemovePayload": { - "additionalProperties": false, - "properties": { - "op": { - "enum": [ - "remove" - ], - "type": "string" - }, - "resource": { - "$ref": "#/$defs/MothershipStreamV1ResourceDescriptor" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1ResourceRemovePayload: { + additionalProperties: false, + properties: { + op: { + enum: ['remove'], + type: 'string', + }, + resource: { + $ref: '#/$defs/MothershipStreamV1ResourceDescriptor', + }, }, - "required": [ - "op", - "resource" - ], - "type": "object" - }, - "MothershipStreamV1ResourceUpsertEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1ResourceUpsertPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "resource" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['op', 'resource'], + type: 'object', + }, + MothershipStreamV1ResourceUpsertEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1ResourceUpsertPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['resource'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1ResourceUpsertPayload": { - "additionalProperties": false, - "properties": { - "op": { - "enum": [ - "upsert" - ], - "type": "string" - }, - "resource": { - "$ref": "#/$defs/MothershipStreamV1ResourceDescriptor" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1ResourceUpsertPayload: { + additionalProperties: false, + properties: { + op: { + enum: ['upsert'], + type: 'string', + }, + resource: { + $ref: '#/$defs/MothershipStreamV1ResourceDescriptor', + }, }, - "required": [ - "op", - "resource" - ], - "type": "object" - }, - "MothershipStreamV1ResumeRequest": { - "additionalProperties": false, - "properties": { - "checkpointId": { - "type": "string" - }, - "results": { - "items": { - "$ref": "#/$defs/MothershipStreamV1ResumeToolResult" + required: ['op', 'resource'], + type: 'object', + }, + MothershipStreamV1ResumeRequest: { + additionalProperties: false, + properties: { + checkpointId: { + type: 'string', + }, + results: { + items: { + $ref: '#/$defs/MothershipStreamV1ResumeToolResult', }, - "type": "array" + type: 'array', + }, + streamId: { + type: 'string', }, - "streamId": { - "type": "string" - } }, - "required": [ - "streamId", - "checkpointId", - "results" - ], - "type": "object" - }, - "MothershipStreamV1ResumeToolResult": { - "additionalProperties": false, - "properties": { - "error": { - "type": "string" - }, - "output": true, - "success": { - "type": "boolean" - }, - "toolCallId": { - "type": "string" - } + required: ['streamId', 'checkpointId', 'results'], + type: 'object', + }, + MothershipStreamV1ResumeToolResult: { + additionalProperties: false, + properties: { + error: { + type: 'string', + }, + output: true, + success: { + type: 'boolean', + }, + toolCallId: { + type: 'string', + }, }, - "required": [ - "toolCallId", - "success" - ], - "type": "object" - }, - "MothershipStreamV1RunKind": { - "enum": [ - "checkpoint_pause", - "resumed", - "compaction_start", - "compaction_done" - ], - "type": "string" - }, - "MothershipStreamV1RunResumedEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1RunResumedPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "run" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['toolCallId', 'success'], + type: 'object', + }, + MothershipStreamV1RunKind: { + enum: ['checkpoint_pause', 'resumed', 'compaction_start', 'compaction_done'], + type: 'string', + }, + MothershipStreamV1RunResumedEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1RunResumedPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['run'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1RunResumedPayload": { - "additionalProperties": false, - "properties": { - "kind": { - "enum": [ - "resumed" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1RunResumedPayload: { + additionalProperties: false, + properties: { + kind: { + enum: ['resumed'], + type: 'string', + }, }, - "required": [ - "kind" - ], - "type": "object" - }, - "MothershipStreamV1SessionChatEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1SessionChatPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "session" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind'], + type: 'object', + }, + MothershipStreamV1SessionChatEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1SessionChatPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['session'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1SessionChatPayload": { - "additionalProperties": false, - "properties": { - "chatId": { - "type": "string" - }, - "kind": { - "enum": [ - "chat" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1SessionChatPayload: { + additionalProperties: false, + properties: { + chatId: { + type: 'string', + }, + kind: { + enum: ['chat'], + type: 'string', + }, }, - "required": [ - "kind", - "chatId" - ], - "type": "object" - }, - "MothershipStreamV1SessionKind": { - "enum": [ - "trace", - "chat", - "title", - "start" - ], - "type": "string" - }, - "MothershipStreamV1SessionStartData": { - "additionalProperties": false, - "properties": { - "responseId": { - "type": "string" - } + required: ['kind', 'chatId'], + type: 'object', + }, + MothershipStreamV1SessionKind: { + enum: ['trace', 'chat', 'title', 'start'], + type: 'string', + }, + MothershipStreamV1SessionStartData: { + additionalProperties: false, + properties: { + responseId: { + type: 'string', + }, }, - "type": "object" + type: 'object', }, - "MothershipStreamV1SessionStartEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1SessionStartPayload" + MothershipStreamV1SessionStartEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1SessionStartPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" + seq: { + type: 'integer', }, - "seq": { - "type": "integer" + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" + ts: { + type: 'string', }, - "ts": { - "type": "string" + type: { + enum: ['session'], + type: 'string', }, - "type": { - "enum": [ - "session" - ], - "type": "string" + v: { + enum: [1], + type: 'integer', }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1SessionStartPayload": { - "additionalProperties": false, - "properties": { - "data": { - "$ref": "#/$defs/MothershipStreamV1SessionStartData" - }, - "kind": { - "enum": [ - "start" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1SessionStartPayload: { + additionalProperties: false, + properties: { + data: { + $ref: '#/$defs/MothershipStreamV1SessionStartData', + }, + kind: { + enum: ['start'], + type: 'string', + }, }, - "required": [ - "kind" - ], - "type": "object" - }, - "MothershipStreamV1SessionTitleEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1SessionTitlePayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "session" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind'], + type: 'object', + }, + MothershipStreamV1SessionTitleEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1SessionTitlePayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['session'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1SessionTitlePayload": { - "additionalProperties": false, - "properties": { - "kind": { - "enum": [ - "title" - ], - "type": "string" - }, - "title": { - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1SessionTitlePayload: { + additionalProperties: false, + properties: { + kind: { + enum: ['title'], + type: 'string', + }, + title: { + type: 'string', + }, }, - "required": [ - "kind", - "title" - ], - "type": "object" - }, - "MothershipStreamV1SessionTraceEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1SessionTracePayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "session" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind', 'title'], + type: 'object', + }, + MothershipStreamV1SessionTraceEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1SessionTracePayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['session'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1SessionTracePayload": { - "additionalProperties": false, - "properties": { - "kind": { - "enum": [ - "trace" - ], - "type": "string" - }, - "requestId": { - "type": "string" - }, - "spanId": { - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1SessionTracePayload: { + additionalProperties: false, + properties: { + kind: { + enum: ['trace'], + type: 'string', + }, + requestId: { + type: 'string', + }, + spanId: { + type: 'string', + }, }, - "required": [ - "kind", - "requestId" - ], - "type": "object" - }, - "MothershipStreamV1SpanKind": { - "enum": [ - "subagent" - ], - "type": "string" - }, - "MothershipStreamV1SpanLifecycleEvent": { - "enum": [ - "start", - "end" - ], - "type": "string" - }, - "MothershipStreamV1SpanPayloadKind": { - "enum": [ - "subagent", - "structured_result", - "subagent_result" - ], - "type": "string" - }, - "MothershipStreamV1StreamCursor": { - "additionalProperties": false, - "properties": { - "cursor": { - "type": "string" - }, - "seq": { - "type": "integer" - }, - "streamId": { - "type": "string" - } + required: ['kind', 'requestId'], + type: 'object', + }, + MothershipStreamV1SpanKind: { + enum: ['subagent'], + type: 'string', + }, + MothershipStreamV1SpanLifecycleEvent: { + enum: ['start', 'end'], + type: 'string', + }, + MothershipStreamV1SpanPayloadKind: { + enum: ['subagent', 'structured_result', 'subagent_result'], + type: 'string', + }, + MothershipStreamV1StreamCursor: { + additionalProperties: false, + properties: { + cursor: { + type: 'string', + }, + seq: { + type: 'integer', + }, + streamId: { + type: 'string', + }, }, - "required": [ - "streamId", - "cursor", - "seq" - ], - "type": "object" - }, - "MothershipStreamV1StreamRef": { - "additionalProperties": false, - "properties": { - "chatId": { - "type": "string" - }, - "cursor": { - "type": "string" - }, - "streamId": { - "type": "string" - } + required: ['streamId', 'cursor', 'seq'], + type: 'object', + }, + MothershipStreamV1StreamRef: { + additionalProperties: false, + properties: { + chatId: { + type: 'string', + }, + cursor: { + type: 'string', + }, + streamId: { + type: 'string', + }, }, - "required": [ - "streamId" - ], - "type": "object" - }, - "MothershipStreamV1StreamScope": { - "additionalProperties": false, - "properties": { - "agentId": { - "type": "string" - }, - "lane": { - "enum": [ - "subagent" - ], - "type": "string" - }, - "parentToolCallId": { - "type": "string" - } + required: ['streamId'], + type: 'object', + }, + MothershipStreamV1StreamScope: { + additionalProperties: false, + properties: { + agentId: { + type: 'string', + }, + lane: { + enum: ['subagent'], + type: 'string', + }, + parentToolCallId: { + type: 'string', + }, }, - "required": [ - "lane" - ], - "type": "object" - }, - "MothershipStreamV1StructuredResultSpanEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1StructuredResultSpanPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "span" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['lane'], + type: 'object', + }, + MothershipStreamV1StructuredResultSpanEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1StructuredResultSpanPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['span'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1StructuredResultSpanPayload": { - "additionalProperties": false, - "properties": { - "agent": { - "type": "string" - }, - "data": true, - "kind": { - "enum": [ - "structured_result" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1StructuredResultSpanPayload: { + additionalProperties: false, + properties: { + agent: { + type: 'string', + }, + data: true, + kind: { + enum: ['structured_result'], + type: 'string', + }, }, - "required": [ - "kind" - ], - "type": "object" - }, - "MothershipStreamV1SubagentResultSpanEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1SubagentResultSpanPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "span" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind'], + type: 'object', + }, + MothershipStreamV1SubagentResultSpanEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1SubagentResultSpanPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['span'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1SubagentResultSpanPayload": { - "additionalProperties": false, - "properties": { - "agent": { - "type": "string" - }, - "data": true, - "kind": { - "enum": [ - "subagent_result" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1SubagentResultSpanPayload: { + additionalProperties: false, + properties: { + agent: { + type: 'string', + }, + data: true, + kind: { + enum: ['subagent_result'], + type: 'string', + }, }, - "required": [ - "kind" - ], - "type": "object" - }, - "MothershipStreamV1SubagentSpanEndEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1SubagentSpanEndPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "span" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind'], + type: 'object', + }, + MothershipStreamV1SubagentSpanEndEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1SubagentSpanEndPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['span'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1SubagentSpanEndPayload": { - "additionalProperties": false, - "properties": { - "agent": { - "type": "string" - }, - "data": true, - "event": { - "enum": [ - "end" - ], - "type": "string" - }, - "kind": { - "enum": [ - "subagent" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1SubagentSpanEndPayload: { + additionalProperties: false, + properties: { + agent: { + type: 'string', + }, + data: true, + event: { + enum: ['end'], + type: 'string', + }, + kind: { + enum: ['subagent'], + type: 'string', + }, }, - "required": [ - "kind", - "event" - ], - "type": "object" - }, - "MothershipStreamV1SubagentSpanStartEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1SubagentSpanStartPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "span" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind', 'event'], + type: 'object', + }, + MothershipStreamV1SubagentSpanStartEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1SubagentSpanStartPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['span'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1SubagentSpanStartPayload": { - "additionalProperties": false, - "properties": { - "agent": { - "type": "string" - }, - "data": true, - "event": { - "enum": [ - "start" - ], - "type": "string" - }, - "kind": { - "enum": [ - "subagent" - ], - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1SubagentSpanStartPayload: { + additionalProperties: false, + properties: { + agent: { + type: 'string', + }, + data: true, + event: { + enum: ['start'], + type: 'string', + }, + kind: { + enum: ['subagent'], + type: 'string', + }, }, - "required": [ - "kind", - "event" - ], - "type": "object" - }, - "MothershipStreamV1TextChannel": { - "enum": [ - "assistant", - "thinking" - ], - "type": "string" - }, - "MothershipStreamV1TextEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1TextPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "text" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['kind', 'event'], + type: 'object', + }, + MothershipStreamV1TextChannel: { + enum: ['assistant', 'thinking'], + type: 'string', + }, + MothershipStreamV1TextEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1TextPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['text'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1TextPayload": { - "additionalProperties": false, - "properties": { - "channel": { - "$ref": "#/$defs/MothershipStreamV1TextChannel" - }, - "text": { - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1TextPayload: { + additionalProperties: false, + properties: { + channel: { + $ref: '#/$defs/MothershipStreamV1TextChannel', + }, + text: { + type: 'string', + }, }, - "required": [ - "channel", - "text" - ], - "type": "object" - }, - "MothershipStreamV1ToolArgsDeltaEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1ToolArgsDeltaPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "tool" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['channel', 'text'], + type: 'object', + }, + MothershipStreamV1ToolArgsDeltaEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1ToolArgsDeltaPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['tool'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1ToolArgsDeltaPayload": { - "additionalProperties": false, - "properties": { - "argumentsDelta": { - "type": "string" - }, - "executor": { - "$ref": "#/$defs/MothershipStreamV1ToolExecutor" - }, - "mode": { - "$ref": "#/$defs/MothershipStreamV1ToolMode" - }, - "phase": { - "enum": [ - "args_delta" - ], - "type": "string" - }, - "toolCallId": { - "type": "string" - }, - "toolName": { - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1ToolArgsDeltaPayload: { + additionalProperties: false, + properties: { + argumentsDelta: { + type: 'string', + }, + executor: { + $ref: '#/$defs/MothershipStreamV1ToolExecutor', + }, + mode: { + $ref: '#/$defs/MothershipStreamV1ToolMode', + }, + phase: { + enum: ['args_delta'], + type: 'string', + }, + toolCallId: { + type: 'string', + }, + toolName: { + type: 'string', + }, }, - "required": [ - "toolCallId", - "toolName", - "argumentsDelta", - "executor", - "mode", - "phase" - ], - "type": "object" - }, - "MothershipStreamV1ToolCallDescriptor": { - "additionalProperties": false, - "properties": { - "arguments": { - "$ref": "#/$defs/MothershipStreamV1AdditionalPropertiesMap" - }, - "executor": { - "$ref": "#/$defs/MothershipStreamV1ToolExecutor" - }, - "mode": { - "$ref": "#/$defs/MothershipStreamV1ToolMode" - }, - "partial": { - "type": "boolean" - }, - "phase": { - "enum": [ - "call" - ], - "type": "string" - }, - "requiresConfirmation": { - "type": "boolean" - }, - "status": { - "$ref": "#/$defs/MothershipStreamV1ToolStatus" - }, - "toolCallId": { - "type": "string" - }, - "toolName": { - "type": "string" - }, - "ui": { - "$ref": "#/$defs/MothershipStreamV1ToolUI" - } + required: ['toolCallId', 'toolName', 'argumentsDelta', 'executor', 'mode', 'phase'], + type: 'object', + }, + MothershipStreamV1ToolCallDescriptor: { + additionalProperties: false, + properties: { + arguments: { + $ref: '#/$defs/MothershipStreamV1AdditionalPropertiesMap', + }, + executor: { + $ref: '#/$defs/MothershipStreamV1ToolExecutor', + }, + mode: { + $ref: '#/$defs/MothershipStreamV1ToolMode', + }, + partial: { + type: 'boolean', + }, + phase: { + enum: ['call'], + type: 'string', + }, + requiresConfirmation: { + type: 'boolean', + }, + status: { + $ref: '#/$defs/MothershipStreamV1ToolStatus', + }, + toolCallId: { + type: 'string', + }, + toolName: { + type: 'string', + }, + ui: { + $ref: '#/$defs/MothershipStreamV1ToolUI', + }, }, - "required": [ - "toolCallId", - "toolName", - "executor", - "mode", - "phase" - ], - "type": "object" - }, - "MothershipStreamV1ToolCallEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1ToolCallDescriptor" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "tool" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['toolCallId', 'toolName', 'executor', 'mode', 'phase'], + type: 'object', + }, + MothershipStreamV1ToolCallEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1ToolCallDescriptor', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['tool'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1ToolExecutor": { - "enum": [ - "go", - "sim", - "client" - ], - "type": "string" - }, - "MothershipStreamV1ToolMode": { - "enum": [ - "sync", - "async" - ], - "type": "string" - }, - "MothershipStreamV1ToolOutcome": { - "enum": [ - "success", - "error", - "cancelled", - "skipped", - "rejected" - ], - "type": "string" - }, - "MothershipStreamV1ToolPhase": { - "enum": [ - "call", - "args_delta", - "result" - ], - "type": "string" - }, - "MothershipStreamV1ToolResultEventEnvelope": { - "additionalProperties": false, - "properties": { - "payload": { - "$ref": "#/$defs/MothershipStreamV1ToolResultPayload" - }, - "scope": { - "$ref": "#/$defs/MothershipStreamV1StreamScope" - }, - "seq": { - "type": "integer" - }, - "stream": { - "$ref": "#/$defs/MothershipStreamV1StreamRef" - }, - "trace": { - "$ref": "#/$defs/MothershipStreamV1Trace" - }, - "ts": { - "type": "string" - }, - "type": { - "enum": [ - "tool" - ], - "type": "string" - }, - "v": { - "enum": [ - 1 - ], - "type": "integer" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1ToolExecutor: { + enum: ['go', 'sim', 'client'], + type: 'string', + }, + MothershipStreamV1ToolMode: { + enum: ['sync', 'async'], + type: 'string', + }, + MothershipStreamV1ToolOutcome: { + enum: ['success', 'error', 'cancelled', 'skipped', 'rejected'], + type: 'string', + }, + MothershipStreamV1ToolPhase: { + enum: ['call', 'args_delta', 'result'], + type: 'string', + }, + MothershipStreamV1ToolResultEventEnvelope: { + additionalProperties: false, + properties: { + payload: { + $ref: '#/$defs/MothershipStreamV1ToolResultPayload', + }, + scope: { + $ref: '#/$defs/MothershipStreamV1StreamScope', + }, + seq: { + type: 'integer', + }, + stream: { + $ref: '#/$defs/MothershipStreamV1StreamRef', + }, + trace: { + $ref: '#/$defs/MothershipStreamV1Trace', + }, + ts: { + type: 'string', + }, + type: { + enum: ['tool'], + type: 'string', + }, + v: { + enum: [1], + type: 'integer', + }, }, - "required": [ - "v", - "seq", - "ts", - "stream", - "type", - "payload" - ], - "type": "object" - }, - "MothershipStreamV1ToolResultPayload": { - "additionalProperties": false, - "properties": { - "error": { - "type": "string" - }, - "executor": { - "$ref": "#/$defs/MothershipStreamV1ToolExecutor" - }, - "mode": { - "$ref": "#/$defs/MothershipStreamV1ToolMode" - }, - "output": true, - "phase": { - "enum": [ - "result" - ], - "type": "string" - }, - "status": { - "$ref": "#/$defs/MothershipStreamV1ToolStatus" - }, - "success": { - "type": "boolean" - }, - "toolCallId": { - "type": "string" - }, - "toolName": { - "type": "string" - } + required: ['v', 'seq', 'ts', 'stream', 'type', 'payload'], + type: 'object', + }, + MothershipStreamV1ToolResultPayload: { + additionalProperties: false, + properties: { + error: { + type: 'string', + }, + executor: { + $ref: '#/$defs/MothershipStreamV1ToolExecutor', + }, + mode: { + $ref: '#/$defs/MothershipStreamV1ToolMode', + }, + output: true, + phase: { + enum: ['result'], + type: 'string', + }, + status: { + $ref: '#/$defs/MothershipStreamV1ToolStatus', + }, + success: { + type: 'boolean', + }, + toolCallId: { + type: 'string', + }, + toolName: { + type: 'string', + }, }, - "required": [ - "toolCallId", - "toolName", - "executor", - "mode", - "phase", - "success" - ], - "type": "object" - }, - "MothershipStreamV1ToolStatus": { - "enum": [ - "generating", - "executing", - "success", - "error", - "cancelled", - "skipped", - "rejected" - ], - "type": "string" - }, - "MothershipStreamV1ToolUI": { - "additionalProperties": false, - "properties": { - "clientExecutable": { - "type": "boolean" - }, - "hidden": { - "type": "boolean" - }, - "icon": { - "type": "string" - }, - "internal": { - "type": "boolean" - }, - "phaseLabel": { - "type": "string" - }, - "requiresConfirmation": { - "type": "boolean" - }, - "title": { - "type": "string" - } + required: ['toolCallId', 'toolName', 'executor', 'mode', 'phase', 'success'], + type: 'object', + }, + MothershipStreamV1ToolStatus: { + enum: ['generating', 'executing', 'success', 'error', 'cancelled', 'skipped', 'rejected'], + type: 'string', + }, + MothershipStreamV1ToolUI: { + additionalProperties: false, + properties: { + clientExecutable: { + type: 'boolean', + }, + hidden: { + type: 'boolean', + }, + icon: { + type: 'string', + }, + internal: { + type: 'boolean', + }, + phaseLabel: { + type: 'string', + }, + requiresConfirmation: { + type: 'boolean', + }, + title: { + type: 'string', + }, }, - "type": "object" + type: 'object', }, - "MothershipStreamV1Trace": { - "additionalProperties": false, - "properties": { - "goTraceId": { - "description": "OTel trace ID from the first Go ingress. May differ from requestId when Sim assigns the canonical request identity.", - "type": "string" + MothershipStreamV1Trace: { + additionalProperties: false, + properties: { + goTraceId: { + description: + 'OTel trace ID from the first Go ingress. May differ from requestId when Sim assigns the canonical request identity.', + type: 'string', }, - "requestId": { - "type": "string" + requestId: { + type: 'string', + }, + spanId: { + type: 'string', }, - "spanId": { - "type": "string" - } }, - "required": [ - "requestId" - ], - "type": "object" + required: ['requestId'], + type: 'object', }, - "MothershipStreamV1UsageData": { - "additionalProperties": false, - "properties": { - "cache_creation_input_tokens": { - "type": "integer" + MothershipStreamV1UsageData: { + additionalProperties: false, + properties: { + cache_creation_input_tokens: { + type: 'integer', + }, + cache_read_input_tokens: { + type: 'integer', }, - "cache_read_input_tokens": { - "type": "integer" + input_tokens: { + type: 'integer', }, - "input_tokens": { - "type": "integer" + model: { + type: 'string', }, - "model": { - "type": "string" + output_tokens: { + type: 'integer', }, - "output_tokens": { - "type": "integer" + total_tokens: { + type: 'integer', }, - "total_tokens": { - "type": "integer" - } }, - "type": "object" - } + type: 'object', + }, }, - "$id": "mothership-stream-v1.schema.json", - "$schema": "https://json-schema.org/draft/2020-12/schema", - "description": "Shared execution-oriented mothership stream contract from Go to Sim.", - "oneOf": [ + $id: 'mothership-stream-v1.schema.json', + $schema: 'https://json-schema.org/draft/2020-12/schema', + description: 'Shared execution-oriented mothership stream contract from Go to Sim.', + oneOf: [ { - "$ref": "#/$defs/MothershipStreamV1SessionStartEventEnvelope" + $ref: '#/$defs/MothershipStreamV1SessionStartEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1SessionChatEventEnvelope" + $ref: '#/$defs/MothershipStreamV1SessionChatEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1SessionTitleEventEnvelope" + $ref: '#/$defs/MothershipStreamV1SessionTitleEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1SessionTraceEventEnvelope" + $ref: '#/$defs/MothershipStreamV1SessionTraceEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1TextEventEnvelope" + $ref: '#/$defs/MothershipStreamV1TextEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1ToolCallEventEnvelope" + $ref: '#/$defs/MothershipStreamV1ToolCallEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1ToolArgsDeltaEventEnvelope" + $ref: '#/$defs/MothershipStreamV1ToolArgsDeltaEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1ToolResultEventEnvelope" + $ref: '#/$defs/MothershipStreamV1ToolResultEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1SubagentSpanStartEventEnvelope" + $ref: '#/$defs/MothershipStreamV1SubagentSpanStartEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1SubagentSpanEndEventEnvelope" + $ref: '#/$defs/MothershipStreamV1SubagentSpanEndEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1StructuredResultSpanEventEnvelope" + $ref: '#/$defs/MothershipStreamV1StructuredResultSpanEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1SubagentResultSpanEventEnvelope" + $ref: '#/$defs/MothershipStreamV1SubagentResultSpanEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1ResourceUpsertEventEnvelope" + $ref: '#/$defs/MothershipStreamV1ResourceUpsertEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1ResourceRemoveEventEnvelope" + $ref: '#/$defs/MothershipStreamV1ResourceRemoveEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1CheckpointPauseEventEnvelope" + $ref: '#/$defs/MothershipStreamV1CheckpointPauseEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1RunResumedEventEnvelope" + $ref: '#/$defs/MothershipStreamV1RunResumedEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1CompactionStartEventEnvelope" + $ref: '#/$defs/MothershipStreamV1CompactionStartEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1CompactionDoneEventEnvelope" + $ref: '#/$defs/MothershipStreamV1CompactionDoneEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1ErrorEventEnvelope" + $ref: '#/$defs/MothershipStreamV1ErrorEventEnvelope', }, { - "$ref": "#/$defs/MothershipStreamV1CompleteEventEnvelope" - } + $ref: '#/$defs/MothershipStreamV1CompleteEventEnvelope', + }, ], - "title": "MothershipStreamV1EventEnvelope" + title: 'MothershipStreamV1EventEnvelope', } diff --git a/apps/sim/lib/copilot/generated/mothership-stream-v1.ts b/apps/sim/lib/copilot/generated/mothership-stream-v1.ts index 95d86b3823d..ef7f2e065fb 100644 --- a/apps/sim/lib/copilot/generated/mothership-stream-v1.ts +++ b/apps/sim/lib/copilot/generated/mothership-stream-v1.ts @@ -24,512 +24,534 @@ export type MothershipStreamV1EventEnvelope = | MothershipStreamV1CompactionStartEventEnvelope | MothershipStreamV1CompactionDoneEventEnvelope | MothershipStreamV1ErrorEventEnvelope - | MothershipStreamV1CompleteEventEnvelope; -export type MothershipStreamV1TextChannel = "assistant" | "thinking"; -export type MothershipStreamV1ToolExecutor = "go" | "sim" | "client"; -export type MothershipStreamV1ToolMode = "sync" | "async"; + | MothershipStreamV1CompleteEventEnvelope +export type MothershipStreamV1TextChannel = 'assistant' | 'thinking' +export type MothershipStreamV1ToolExecutor = 'go' | 'sim' | 'client' +export type MothershipStreamV1ToolMode = 'sync' | 'async' export type MothershipStreamV1ToolStatus = - | "generating" - | "executing" - | "success" - | "error" - | "cancelled" - | "skipped" - | "rejected"; -export type MothershipStreamV1CompletionStatus = "complete" | "error" | "cancelled"; + | 'generating' + | 'executing' + | 'success' + | 'error' + | 'cancelled' + | 'skipped' + | 'rejected' +export type MothershipStreamV1CompletionStatus = 'complete' | 'error' | 'cancelled' export interface MothershipStreamV1SessionStartEventEnvelope { - payload: MothershipStreamV1SessionStartPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "session"; - v: 1; + payload: MothershipStreamV1SessionStartPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'session' + v: 1 } export interface MothershipStreamV1SessionStartPayload { - data?: MothershipStreamV1SessionStartData; - kind: "start"; + data?: MothershipStreamV1SessionStartData + kind: 'start' } export interface MothershipStreamV1SessionStartData { - responseId?: string; + responseId?: string } export interface MothershipStreamV1StreamScope { - agentId?: string; - lane: "subagent"; - parentToolCallId?: string; + agentId?: string + lane: 'subagent' + parentToolCallId?: string } export interface MothershipStreamV1StreamRef { - chatId?: string; - cursor?: string; - streamId: string; + chatId?: string + cursor?: string + streamId: string } export interface MothershipStreamV1Trace { /** * OTel trace ID from the first Go ingress. May differ from requestId when Sim assigns the canonical request identity. */ - goTraceId?: string; - requestId: string; - spanId?: string; + goTraceId?: string + requestId: string + spanId?: string } export interface MothershipStreamV1SessionChatEventEnvelope { - payload: MothershipStreamV1SessionChatPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "session"; - v: 1; + payload: MothershipStreamV1SessionChatPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'session' + v: 1 } export interface MothershipStreamV1SessionChatPayload { - chatId: string; - kind: "chat"; + chatId: string + kind: 'chat' } export interface MothershipStreamV1SessionTitleEventEnvelope { - payload: MothershipStreamV1SessionTitlePayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "session"; - v: 1; + payload: MothershipStreamV1SessionTitlePayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'session' + v: 1 } export interface MothershipStreamV1SessionTitlePayload { - kind: "title"; - title: string; + kind: 'title' + title: string } export interface MothershipStreamV1SessionTraceEventEnvelope { - payload: MothershipStreamV1SessionTracePayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "session"; - v: 1; + payload: MothershipStreamV1SessionTracePayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'session' + v: 1 } export interface MothershipStreamV1SessionTracePayload { - kind: "trace"; - requestId: string; - spanId?: string; + kind: 'trace' + requestId: string + spanId?: string } export interface MothershipStreamV1TextEventEnvelope { - payload: MothershipStreamV1TextPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "text"; - v: 1; + payload: MothershipStreamV1TextPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'text' + v: 1 } export interface MothershipStreamV1TextPayload { - channel: MothershipStreamV1TextChannel; - text: string; + channel: MothershipStreamV1TextChannel + text: string } export interface MothershipStreamV1ToolCallEventEnvelope { - payload: MothershipStreamV1ToolCallDescriptor; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "tool"; - v: 1; + payload: MothershipStreamV1ToolCallDescriptor + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'tool' + v: 1 } export interface MothershipStreamV1ToolCallDescriptor { - arguments?: MothershipStreamV1AdditionalPropertiesMap; - executor: MothershipStreamV1ToolExecutor; - mode: MothershipStreamV1ToolMode; - partial?: boolean; - phase: "call"; - requiresConfirmation?: boolean; - status?: MothershipStreamV1ToolStatus; - toolCallId: string; - toolName: string; - ui?: MothershipStreamV1ToolUI; + arguments?: MothershipStreamV1AdditionalPropertiesMap + executor: MothershipStreamV1ToolExecutor + mode: MothershipStreamV1ToolMode + partial?: boolean + phase: 'call' + requiresConfirmation?: boolean + status?: MothershipStreamV1ToolStatus + toolCallId: string + toolName: string + ui?: MothershipStreamV1ToolUI } export interface MothershipStreamV1AdditionalPropertiesMap { - [k: string]: unknown; + [k: string]: unknown } export interface MothershipStreamV1ToolUI { - clientExecutable?: boolean; - hidden?: boolean; - icon?: string; - internal?: boolean; - phaseLabel?: string; - requiresConfirmation?: boolean; - title?: string; + clientExecutable?: boolean + hidden?: boolean + icon?: string + internal?: boolean + phaseLabel?: string + requiresConfirmation?: boolean + title?: string } export interface MothershipStreamV1ToolArgsDeltaEventEnvelope { - payload: MothershipStreamV1ToolArgsDeltaPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "tool"; - v: 1; + payload: MothershipStreamV1ToolArgsDeltaPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'tool' + v: 1 } export interface MothershipStreamV1ToolArgsDeltaPayload { - argumentsDelta: string; - executor: MothershipStreamV1ToolExecutor; - mode: MothershipStreamV1ToolMode; - phase: "args_delta"; - toolCallId: string; - toolName: string; + argumentsDelta: string + executor: MothershipStreamV1ToolExecutor + mode: MothershipStreamV1ToolMode + phase: 'args_delta' + toolCallId: string + toolName: string } export interface MothershipStreamV1ToolResultEventEnvelope { - payload: MothershipStreamV1ToolResultPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "tool"; - v: 1; + payload: MothershipStreamV1ToolResultPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'tool' + v: 1 } export interface MothershipStreamV1ToolResultPayload { - error?: string; - executor: MothershipStreamV1ToolExecutor; - mode: MothershipStreamV1ToolMode; - output?: unknown; - phase: "result"; - status?: MothershipStreamV1ToolStatus; - success: boolean; - toolCallId: string; - toolName: string; + error?: string + executor: MothershipStreamV1ToolExecutor + mode: MothershipStreamV1ToolMode + output?: unknown + phase: 'result' + status?: MothershipStreamV1ToolStatus + success: boolean + toolCallId: string + toolName: string } export interface MothershipStreamV1SubagentSpanStartEventEnvelope { - payload: MothershipStreamV1SubagentSpanStartPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "span"; - v: 1; + payload: MothershipStreamV1SubagentSpanStartPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'span' + v: 1 } export interface MothershipStreamV1SubagentSpanStartPayload { - agent?: string; - data?: unknown; - event: "start"; - kind: "subagent"; + agent?: string + data?: unknown + event: 'start' + kind: 'subagent' } export interface MothershipStreamV1SubagentSpanEndEventEnvelope { - payload: MothershipStreamV1SubagentSpanEndPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "span"; - v: 1; + payload: MothershipStreamV1SubagentSpanEndPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'span' + v: 1 } export interface MothershipStreamV1SubagentSpanEndPayload { - agent?: string; - data?: unknown; - event: "end"; - kind: "subagent"; + agent?: string + data?: unknown + event: 'end' + kind: 'subagent' } export interface MothershipStreamV1StructuredResultSpanEventEnvelope { - payload: MothershipStreamV1StructuredResultSpanPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "span"; - v: 1; + payload: MothershipStreamV1StructuredResultSpanPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'span' + v: 1 } export interface MothershipStreamV1StructuredResultSpanPayload { - agent?: string; - data?: unknown; - kind: "structured_result"; + agent?: string + data?: unknown + kind: 'structured_result' } export interface MothershipStreamV1SubagentResultSpanEventEnvelope { - payload: MothershipStreamV1SubagentResultSpanPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "span"; - v: 1; + payload: MothershipStreamV1SubagentResultSpanPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'span' + v: 1 } export interface MothershipStreamV1SubagentResultSpanPayload { - agent?: string; - data?: unknown; - kind: "subagent_result"; + agent?: string + data?: unknown + kind: 'subagent_result' } export interface MothershipStreamV1ResourceUpsertEventEnvelope { - payload: MothershipStreamV1ResourceUpsertPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "resource"; - v: 1; + payload: MothershipStreamV1ResourceUpsertPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'resource' + v: 1 } export interface MothershipStreamV1ResourceUpsertPayload { - op: "upsert"; - resource: MothershipStreamV1ResourceDescriptor; + op: 'upsert' + resource: MothershipStreamV1ResourceDescriptor } export interface MothershipStreamV1ResourceDescriptor { - id: string; - title?: string; - type: string; + id: string + title?: string + type: string } export interface MothershipStreamV1ResourceRemoveEventEnvelope { - payload: MothershipStreamV1ResourceRemovePayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "resource"; - v: 1; + payload: MothershipStreamV1ResourceRemovePayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'resource' + v: 1 } export interface MothershipStreamV1ResourceRemovePayload { - op: "remove"; - resource: MothershipStreamV1ResourceDescriptor; + op: 'remove' + resource: MothershipStreamV1ResourceDescriptor } export interface MothershipStreamV1CheckpointPauseEventEnvelope { - payload: MothershipStreamV1CheckpointPausePayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "run"; - v: 1; + payload: MothershipStreamV1CheckpointPausePayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'run' + v: 1 } export interface MothershipStreamV1CheckpointPausePayload { - checkpointId: string; - executionId: string; - frames?: MothershipStreamV1CheckpointPauseFrame[]; - kind: "checkpoint_pause"; - pendingToolCallIds: string[]; - runId: string; + checkpointId: string + executionId: string + frames?: MothershipStreamV1CheckpointPauseFrame[] + kind: 'checkpoint_pause' + pendingToolCallIds: string[] + runId: string } export interface MothershipStreamV1CheckpointPauseFrame { - parentToolCallId: string; - parentToolName: string; - pendingToolIds: string[]; + parentToolCallId: string + parentToolName: string + pendingToolIds: string[] } export interface MothershipStreamV1RunResumedEventEnvelope { - payload: MothershipStreamV1RunResumedPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "run"; - v: 1; + payload: MothershipStreamV1RunResumedPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'run' + v: 1 } export interface MothershipStreamV1RunResumedPayload { - kind: "resumed"; + kind: 'resumed' } export interface MothershipStreamV1CompactionStartEventEnvelope { - payload: MothershipStreamV1CompactionStartPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "run"; - v: 1; + payload: MothershipStreamV1CompactionStartPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'run' + v: 1 } export interface MothershipStreamV1CompactionStartPayload { - kind: "compaction_start"; + kind: 'compaction_start' } export interface MothershipStreamV1CompactionDoneEventEnvelope { - payload: MothershipStreamV1CompactionDonePayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "run"; - v: 1; + payload: MothershipStreamV1CompactionDonePayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'run' + v: 1 } export interface MothershipStreamV1CompactionDonePayload { - data?: MothershipStreamV1CompactionDoneData; - kind: "compaction_done"; + data?: MothershipStreamV1CompactionDoneData + kind: 'compaction_done' } export interface MothershipStreamV1CompactionDoneData { - summary_chars: number; + summary_chars: number } export interface MothershipStreamV1ErrorEventEnvelope { - payload: MothershipStreamV1ErrorPayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "error"; - v: 1; + payload: MothershipStreamV1ErrorPayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'error' + v: 1 } export interface MothershipStreamV1ErrorPayload { - code?: string; - data?: unknown; - displayMessage?: string; - error?: string; - message: string; - provider?: string; + code?: string + data?: unknown + displayMessage?: string + error?: string + message: string + provider?: string } export interface MothershipStreamV1CompleteEventEnvelope { - payload: MothershipStreamV1CompletePayload; - scope?: MothershipStreamV1StreamScope; - seq: number; - stream: MothershipStreamV1StreamRef; - trace?: MothershipStreamV1Trace; - ts: string; - type: "complete"; - v: 1; + payload: MothershipStreamV1CompletePayload + scope?: MothershipStreamV1StreamScope + seq: number + stream: MothershipStreamV1StreamRef + trace?: MothershipStreamV1Trace + ts: string + type: 'complete' + v: 1 } export interface MothershipStreamV1CompletePayload { - cost?: MothershipStreamV1CostData; - reason?: string; - response?: unknown; - status: MothershipStreamV1CompletionStatus; - usage?: MothershipStreamV1UsageData; + cost?: MothershipStreamV1CostData + reason?: string + response?: unknown + status: MothershipStreamV1CompletionStatus + usage?: MothershipStreamV1UsageData } export interface MothershipStreamV1CostData { - input?: number; - output?: number; - total?: number; + input?: number + output?: number + total?: number } export interface MothershipStreamV1UsageData { - cache_creation_input_tokens?: number; - cache_read_input_tokens?: number; - input_tokens?: number; - model?: string; - output_tokens?: number; - total_tokens?: number; + cache_creation_input_tokens?: number + cache_read_input_tokens?: number + input_tokens?: number + model?: string + output_tokens?: number + total_tokens?: number } -export type MothershipStreamV1AsyncToolRecordStatus = "pending" | "running" | "completed" | "failed" | "cancelled" | "delivered" +export type MothershipStreamV1AsyncToolRecordStatus = + | 'pending' + | 'running' + | 'completed' + | 'failed' + | 'cancelled' + | 'delivered' export const MothershipStreamV1AsyncToolRecordStatus = { - "pending": "pending", - "running": "running", - "completed": "completed", - "failed": "failed", - "cancelled": "cancelled", - "delivered": "delivered", -} as const; + pending: 'pending', + running: 'running', + completed: 'completed', + failed: 'failed', + cancelled: 'cancelled', + delivered: 'delivered', +} as const export const MothershipStreamV1CompletionStatus = { - "complete": "complete", - "error": "error", - "cancelled": "cancelled", -} as const; + complete: 'complete', + error: 'error', + cancelled: 'cancelled', +} as const -export type MothershipStreamV1EventType = "session" | "text" | "tool" | "span" | "resource" | "run" | "error" | "complete" +export type MothershipStreamV1EventType = + | 'session' + | 'text' + | 'tool' + | 'span' + | 'resource' + | 'run' + | 'error' + | 'complete' export const MothershipStreamV1EventType = { - "session": "session", - "text": "text", - "tool": "tool", - "span": "span", - "resource": "resource", - "run": "run", - "error": "error", - "complete": "complete", -} as const; + session: 'session', + text: 'text', + tool: 'tool', + span: 'span', + resource: 'resource', + run: 'run', + error: 'error', + complete: 'complete', +} as const -export type MothershipStreamV1ResourceOp = "upsert" | "remove" +export type MothershipStreamV1ResourceOp = 'upsert' | 'remove' export const MothershipStreamV1ResourceOp = { - "upsert": "upsert", - "remove": "remove", -} as const; + upsert: 'upsert', + remove: 'remove', +} as const -export type MothershipStreamV1RunKind = "checkpoint_pause" | "resumed" | "compaction_start" | "compaction_done" +export type MothershipStreamV1RunKind = + | 'checkpoint_pause' + | 'resumed' + | 'compaction_start' + | 'compaction_done' export const MothershipStreamV1RunKind = { - "checkpoint_pause": "checkpoint_pause", - "resumed": "resumed", - "compaction_start": "compaction_start", - "compaction_done": "compaction_done", -} as const; + checkpoint_pause: 'checkpoint_pause', + resumed: 'resumed', + compaction_start: 'compaction_start', + compaction_done: 'compaction_done', +} as const -export type MothershipStreamV1SessionKind = "trace" | "chat" | "title" | "start" +export type MothershipStreamV1SessionKind = 'trace' | 'chat' | 'title' | 'start' export const MothershipStreamV1SessionKind = { - "trace": "trace", - "chat": "chat", - "title": "title", - "start": "start", -} as const; + trace: 'trace', + chat: 'chat', + title: 'title', + start: 'start', +} as const -export type MothershipStreamV1SpanKind = "subagent" +export type MothershipStreamV1SpanKind = 'subagent' export const MothershipStreamV1SpanKind = { - "subagent": "subagent", -} as const; + subagent: 'subagent', +} as const -export type MothershipStreamV1SpanLifecycleEvent = "start" | "end" +export type MothershipStreamV1SpanLifecycleEvent = 'start' | 'end' export const MothershipStreamV1SpanLifecycleEvent = { - "start": "start", - "end": "end", -} as const; + start: 'start', + end: 'end', +} as const -export type MothershipStreamV1SpanPayloadKind = "subagent" | "structured_result" | "subagent_result" +export type MothershipStreamV1SpanPayloadKind = 'subagent' | 'structured_result' | 'subagent_result' export const MothershipStreamV1SpanPayloadKind = { - "subagent": "subagent", - "structured_result": "structured_result", - "subagent_result": "subagent_result", -} as const; + subagent: 'subagent', + structured_result: 'structured_result', + subagent_result: 'subagent_result', +} as const export const MothershipStreamV1TextChannel = { - "assistant": "assistant", - "thinking": "thinking", -} as const; + assistant: 'assistant', + thinking: 'thinking', +} as const export const MothershipStreamV1ToolExecutor = { - "go": "go", - "sim": "sim", - "client": "client", -} as const; + go: 'go', + sim: 'sim', + client: 'client', +} as const export const MothershipStreamV1ToolMode = { - "sync": "sync", - "async": "async", -} as const; + sync: 'sync', + async: 'async', +} as const -export type MothershipStreamV1ToolOutcome = "success" | "error" | "cancelled" | "skipped" | "rejected" +export type MothershipStreamV1ToolOutcome = + | 'success' + | 'error' + | 'cancelled' + | 'skipped' + | 'rejected' export const MothershipStreamV1ToolOutcome = { - "success": "success", - "error": "error", - "cancelled": "cancelled", - "skipped": "skipped", - "rejected": "rejected", -} as const; + success: 'success', + error: 'error', + cancelled: 'cancelled', + skipped: 'skipped', + rejected: 'rejected', +} as const -export type MothershipStreamV1ToolPhase = "call" | "args_delta" | "result" +export type MothershipStreamV1ToolPhase = 'call' | 'args_delta' | 'result' export const MothershipStreamV1ToolPhase = { - "call": "call", - "args_delta": "args_delta", - "result": "result", -} as const; + call: 'call', + args_delta: 'args_delta', + result: 'result', +} as const export const MothershipStreamV1ToolStatus = { - "generating": "generating", - "executing": "executing", - "success": "success", - "error": "error", - "cancelled": "cancelled", - "skipped": "skipped", - "rejected": "rejected", -} as const; - + generating: 'generating', + executing: 'executing', + success: 'success', + error: 'error', + cancelled: 'cancelled', + skipped: 'skipped', + rejected: 'rejected', +} as const diff --git a/apps/sim/lib/copilot/generated/request-trace-v1.ts b/apps/sim/lib/copilot/generated/request-trace-v1.ts index d1cd137dfbc..31a60bb5159 100644 --- a/apps/sim/lib/copilot/generated/request-trace-v1.ts +++ b/apps/sim/lib/copilot/generated/request-trace-v1.ts @@ -5,130 +5,131 @@ * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1Outcome". */ -export type RequestTraceV1Outcome = "success" | "error" | "cancelled"; +export type RequestTraceV1Outcome = 'success' | 'error' | 'cancelled' /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1SpanSource". */ -export type RequestTraceV1SpanSource = "sim" | "go"; +export type RequestTraceV1SpanSource = 'sim' | 'go' /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1SpanStatus". */ -export type RequestTraceV1SpanStatus = "ok" | "error" | "cancelled" | "pending"; +export type RequestTraceV1SpanStatus = 'ok' | 'error' | 'cancelled' | 'pending' /** * Trace report sent from Sim to Go after a request completes. */ export interface RequestTraceV1SimReport { - chatId?: string; - cost?: RequestTraceV1CostSummary; - durationMs: number; - endMs: number; - executionId?: string; - goTraceId?: string; - outcome: RequestTraceV1Outcome; - runId?: string; - simRequestId: string; - spans: RequestTraceV1Span[]; - startMs: number; - streamId?: string; - usage?: RequestTraceV1UsageSummary; + chatId?: string + cost?: RequestTraceV1CostSummary + durationMs: number + endMs: number + executionId?: string + goTraceId?: string + outcome: RequestTraceV1Outcome + runId?: string + simRequestId: string + spans: RequestTraceV1Span[] + startMs: number + streamId?: string + usage?: RequestTraceV1UsageSummary + userMessage?: string } /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1CostSummary". */ export interface RequestTraceV1CostSummary { - billedTotalCost?: number; - rawTotalCost?: number; + billedTotalCost?: number + rawTotalCost?: number } /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1Span". */ export interface RequestTraceV1Span { - attributes?: MothershipStreamV1AdditionalPropertiesMap; - durationMs: number; - endMs: number; - kind?: string; - name: string; - parentName?: string; - source?: RequestTraceV1SpanSource; - startMs: number; - status: RequestTraceV1SpanStatus; + attributes?: MothershipStreamV1AdditionalPropertiesMap + durationMs: number + endMs: number + kind?: string + name: string + parentName?: string + source?: RequestTraceV1SpanSource + startMs: number + status: RequestTraceV1SpanStatus } /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "MothershipStreamV1AdditionalPropertiesMap". */ export interface MothershipStreamV1AdditionalPropertiesMap { - [k: string]: unknown; + [k: string]: unknown } /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1UsageSummary". */ export interface RequestTraceV1UsageSummary { - cacheReadTokens?: number; - cacheWriteTokens?: number; - inputTokens?: number; - outputTokens?: number; + cacheReadTokens?: number + cacheWriteTokens?: number + inputTokens?: number + outputTokens?: number } /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1MergedTrace". */ export interface RequestTraceV1MergedTrace { - chatId?: string; - cost?: RequestTraceV1CostSummary; - durationMs: number; - endMs: number; - goTraceId: string; - outcome: RequestTraceV1Outcome; - serviceCharges?: MothershipStreamV1AdditionalPropertiesMap; - simRequestId?: string; - spans: RequestTraceV1Span[]; - startMs: number; - streamId?: string; - usage?: RequestTraceV1UsageSummary; - userId?: string; + chatId?: string + cost?: RequestTraceV1CostSummary + durationMs: number + endMs: number + goTraceId: string + outcome: RequestTraceV1Outcome + serviceCharges?: MothershipStreamV1AdditionalPropertiesMap + simRequestId?: string + spans: RequestTraceV1Span[] + startMs: number + streamId?: string + usage?: RequestTraceV1UsageSummary + userId?: string } /** * This interface was referenced by `RequestTraceV1SimReport`'s JSON-Schema * via the `definition` "RequestTraceV1SimReport". */ export interface RequestTraceV1SimReport1 { - chatId?: string; - cost?: RequestTraceV1CostSummary; - durationMs: number; - endMs: number; - executionId?: string; - goTraceId?: string; - outcome: RequestTraceV1Outcome; - runId?: string; - simRequestId: string; - spans: RequestTraceV1Span[]; - startMs: number; - streamId?: string; - usage?: RequestTraceV1UsageSummary; + chatId?: string + cost?: RequestTraceV1CostSummary + durationMs: number + endMs: number + executionId?: string + goTraceId?: string + outcome: RequestTraceV1Outcome + runId?: string + simRequestId: string + spans: RequestTraceV1Span[] + startMs: number + streamId?: string + usage?: RequestTraceV1UsageSummary + userMessage?: string } export const RequestTraceV1Outcome = { - "success": "success", - "error": "error", - "cancelled": "cancelled", -} as const; + success: 'success', + error: 'error', + cancelled: 'cancelled', +} as const export const RequestTraceV1SpanSource = { - "sim": "sim", - "go": "go", -} as const; + sim: 'sim', + go: 'go', +} as const export const RequestTraceV1SpanStatus = { - "ok": "ok", - "error": "error", - "cancelled": "cancelled", - "pending": "pending", -} as const; - + ok: 'ok', + error: 'error', + cancelled: 'cancelled', + pending: 'pending', +} as const diff --git a/apps/sim/lib/copilot/generated/tool-catalog-v1.ts b/apps/sim/lib/copilot/generated/tool-catalog-v1.ts index cfc93ef496d..2af7b1d660e 100644 --- a/apps/sim/lib/copilot/generated/tool-catalog-v1.ts +++ b/apps/sim/lib/copilot/generated/tool-catalog-v1.ts @@ -3,838 +3,3107 @@ // export interface ToolCatalogEntry { - clientExecutable?: boolean; - hidden?: boolean; - id: "agent" | "auth" | "check_deployment_status" | "complete_job" | "context_write" | "crawl_website" | "create_file" | "create_folder" | "create_job" | "create_workflow" | "create_workspace_mcp_server" | "debug" | "delete_file" | "delete_folder" | "delete_workflow" | "delete_workspace_mcp_server" | "deploy" | "deploy_api" | "deploy_chat" | "deploy_mcp" | "download_to_workspace_file" | "edit_content" | "edit_workflow" | "file" | "function_execute" | "generate_api_key" | "generate_image" | "generate_visualization" | "get_block_outputs" | "get_block_upstream_references" | "get_deployed_workflow_state" | "get_deployment_version" | "get_execution_summary" | "get_job_logs" | "get_page_contents" | "get_platform_actions" | "get_workflow_data" | "get_workflow_logs" | "glob" | "grep" | "job" | "knowledge" | "knowledge_base" | "list_folders" | "list_user_workspaces" | "list_workspace_mcp_servers" | "manage_credential" | "manage_custom_tool" | "manage_job" | "manage_mcp_tool" | "manage_skill" | "materialize_file" | "move_folder" | "move_workflow" | "oauth_get_auth_link" | "oauth_request_access" | "open_resource" | "read" | "redeploy" | "rename_file" | "rename_workflow" | "research" | "respond" | "restore_resource" | "revert_to_version" | "run" | "run_block" | "run_from_block" | "run_workflow" | "run_workflow_until_block" | "scrape_page" | "search_documentation" | "search_library_docs" | "search_online" | "search_patterns" | "set_block_enabled" | "set_environment_variables" | "set_global_workflow_variables" | "superagent" | "table" | "tool_search_tool_regex" | "update_job_history" | "update_workspace_mcp_server" | "user_memory" | "user_table" | "workflow" | "workspace_file"; - internal?: boolean; - mode: "async" | "sync"; - name: "agent" | "auth" | "check_deployment_status" | "complete_job" | "context_write" | "crawl_website" | "create_file" | "create_folder" | "create_job" | "create_workflow" | "create_workspace_mcp_server" | "debug" | "delete_file" | "delete_folder" | "delete_workflow" | "delete_workspace_mcp_server" | "deploy" | "deploy_api" | "deploy_chat" | "deploy_mcp" | "download_to_workspace_file" | "edit_content" | "edit_workflow" | "file" | "function_execute" | "generate_api_key" | "generate_image" | "generate_visualization" | "get_block_outputs" | "get_block_upstream_references" | "get_deployed_workflow_state" | "get_deployment_version" | "get_execution_summary" | "get_job_logs" | "get_page_contents" | "get_platform_actions" | "get_workflow_data" | "get_workflow_logs" | "glob" | "grep" | "job" | "knowledge" | "knowledge_base" | "list_folders" | "list_user_workspaces" | "list_workspace_mcp_servers" | "manage_credential" | "manage_custom_tool" | "manage_job" | "manage_mcp_tool" | "manage_skill" | "materialize_file" | "move_folder" | "move_workflow" | "oauth_get_auth_link" | "oauth_request_access" | "open_resource" | "read" | "redeploy" | "rename_file" | "rename_workflow" | "research" | "respond" | "restore_resource" | "revert_to_version" | "run" | "run_block" | "run_from_block" | "run_workflow" | "run_workflow_until_block" | "scrape_page" | "search_documentation" | "search_library_docs" | "search_online" | "search_patterns" | "set_block_enabled" | "set_environment_variables" | "set_global_workflow_variables" | "superagent" | "table" | "tool_search_tool_regex" | "update_job_history" | "update_workspace_mcp_server" | "user_memory" | "user_table" | "workflow" | "workspace_file"; - parameters: unknown; - requiredPermission?: "admin" | "write"; - requiresConfirmation?: boolean; - resultSchema?: unknown; - route: "client" | "go" | "sim" | "subagent"; - subagentId?: "agent" | "auth" | "debug" | "deploy" | "file" | "job" | "knowledge" | "research" | "run" | "superagent" | "table" | "workflow"; + clientExecutable?: boolean + hidden?: boolean + id: + | 'agent' + | 'auth' + | 'check_deployment_status' + | 'complete_job' + | 'context_write' + | 'crawl_website' + | 'create_file' + | 'create_folder' + | 'create_job' + | 'create_workflow' + | 'create_workspace_mcp_server' + | 'debug' + | 'delete_file' + | 'delete_folder' + | 'delete_workflow' + | 'delete_workspace_mcp_server' + | 'deploy' + | 'deploy_api' + | 'deploy_chat' + | 'deploy_mcp' + | 'download_to_workspace_file' + | 'edit_content' + | 'edit_workflow' + | 'file' + | 'function_execute' + | 'generate_api_key' + | 'generate_image' + | 'generate_visualization' + | 'get_block_outputs' + | 'get_block_upstream_references' + | 'get_deployed_workflow_state' + | 'get_deployment_version' + | 'get_execution_summary' + | 'get_job_logs' + | 'get_page_contents' + | 'get_platform_actions' + | 'get_workflow_data' + | 'get_workflow_logs' + | 'glob' + | 'grep' + | 'job' + | 'knowledge' + | 'knowledge_base' + | 'list_folders' + | 'list_user_workspaces' + | 'list_workspace_mcp_servers' + | 'manage_credential' + | 'manage_custom_tool' + | 'manage_job' + | 'manage_mcp_tool' + | 'manage_skill' + | 'materialize_file' + | 'move_folder' + | 'move_workflow' + | 'oauth_get_auth_link' + | 'oauth_request_access' + | 'open_resource' + | 'read' + | 'redeploy' + | 'rename_file' + | 'rename_workflow' + | 'research' + | 'respond' + | 'restore_resource' + | 'revert_to_version' + | 'run' + | 'run_block' + | 'run_from_block' + | 'run_workflow' + | 'run_workflow_until_block' + | 'scrape_page' + | 'search_documentation' + | 'search_library_docs' + | 'search_online' + | 'search_patterns' + | 'set_block_enabled' + | 'set_environment_variables' + | 'set_global_workflow_variables' + | 'superagent' + | 'table' + | 'tool_search_tool_regex' + | 'update_job_history' + | 'update_workspace_mcp_server' + | 'user_memory' + | 'user_table' + | 'workflow' + | 'workspace_file' + internal?: boolean + mode: 'async' | 'sync' + name: + | 'agent' + | 'auth' + | 'check_deployment_status' + | 'complete_job' + | 'context_write' + | 'crawl_website' + | 'create_file' + | 'create_folder' + | 'create_job' + | 'create_workflow' + | 'create_workspace_mcp_server' + | 'debug' + | 'delete_file' + | 'delete_folder' + | 'delete_workflow' + | 'delete_workspace_mcp_server' + | 'deploy' + | 'deploy_api' + | 'deploy_chat' + | 'deploy_mcp' + | 'download_to_workspace_file' + | 'edit_content' + | 'edit_workflow' + | 'file' + | 'function_execute' + | 'generate_api_key' + | 'generate_image' + | 'generate_visualization' + | 'get_block_outputs' + | 'get_block_upstream_references' + | 'get_deployed_workflow_state' + | 'get_deployment_version' + | 'get_execution_summary' + | 'get_job_logs' + | 'get_page_contents' + | 'get_platform_actions' + | 'get_workflow_data' + | 'get_workflow_logs' + | 'glob' + | 'grep' + | 'job' + | 'knowledge' + | 'knowledge_base' + | 'list_folders' + | 'list_user_workspaces' + | 'list_workspace_mcp_servers' + | 'manage_credential' + | 'manage_custom_tool' + | 'manage_job' + | 'manage_mcp_tool' + | 'manage_skill' + | 'materialize_file' + | 'move_folder' + | 'move_workflow' + | 'oauth_get_auth_link' + | 'oauth_request_access' + | 'open_resource' + | 'read' + | 'redeploy' + | 'rename_file' + | 'rename_workflow' + | 'research' + | 'respond' + | 'restore_resource' + | 'revert_to_version' + | 'run' + | 'run_block' + | 'run_from_block' + | 'run_workflow' + | 'run_workflow_until_block' + | 'scrape_page' + | 'search_documentation' + | 'search_library_docs' + | 'search_online' + | 'search_patterns' + | 'set_block_enabled' + | 'set_environment_variables' + | 'set_global_workflow_variables' + | 'superagent' + | 'table' + | 'tool_search_tool_regex' + | 'update_job_history' + | 'update_workspace_mcp_server' + | 'user_memory' + | 'user_table' + | 'workflow' + | 'workspace_file' + parameters: unknown + requiredPermission?: 'admin' | 'write' + requiresConfirmation?: boolean + resultSchema?: unknown + route: 'client' | 'go' | 'sim' | 'subagent' + subagentId?: + | 'agent' + | 'auth' + | 'debug' + | 'deploy' + | 'file' + | 'job' + | 'knowledge' + | 'research' + | 'run' + | 'superagent' + | 'table' + | 'workflow' } export const Agent: ToolCatalogEntry = { - id: "agent", - name: "agent", - route: "subagent", - mode: "async", - parameters: {"properties":{"request":{"description":"What tool/skill/MCP action is needed.","type":"string"}},"required":["request"],"type":"object"}, - subagentId: "agent", + id: 'agent', + name: 'agent', + route: 'subagent', + mode: 'async', + parameters: { + properties: { + request: { description: 'What tool/skill/MCP action is needed.', type: 'string' }, + }, + required: ['request'], + type: 'object', + }, + subagentId: 'agent', internal: true, - requiredPermission: "write", -}; + requiredPermission: 'write', +} export const Auth: ToolCatalogEntry = { - id: "auth", - name: "auth", - route: "subagent", - mode: "async", - parameters: {"properties":{"request":{"description":"What authentication/credential action is needed.","type":"string"}},"required":["request"],"type":"object"}, - subagentId: "auth", + id: 'auth', + name: 'auth', + route: 'subagent', + mode: 'async', + parameters: { + properties: { + request: { description: 'What authentication/credential action is needed.', type: 'string' }, + }, + required: ['request'], + type: 'object', + }, + subagentId: 'auth', internal: true, -}; +} export const CheckDeploymentStatus: ToolCatalogEntry = { - id: "check_deployment_status", - name: "check_deployment_status", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"workflowId":{"type":"string","description":"Workflow ID to check (defaults to current workflow)"}}}, -}; + id: 'check_deployment_status', + name: 'check_deployment_status', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'Workflow ID to check (defaults to current workflow)', + }, + }, + }, +} export const CompleteJob: ToolCatalogEntry = { - id: "complete_job", - name: "complete_job", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"jobId":{"type":"string","description":"The ID of the job to mark as completed."}},"required":["jobId"]}, -}; + id: 'complete_job', + name: 'complete_job', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + jobId: { type: 'string', description: 'The ID of the job to mark as completed.' }, + }, + required: ['jobId'], + }, +} export const ContextWrite: ToolCatalogEntry = { - id: "context_write", - name: "context_write", - route: "go", - mode: "sync", - parameters: {"type":"object","properties":{"content":{"type":"string","description":"Full content to write to the file (replaces existing content)"},"file_path":{"type":"string","description":"Path of the file to write (e.g. 'SESSION.md')"}},"required":["file_path","content"]}, -}; + id: 'context_write', + name: 'context_write', + route: 'go', + mode: 'sync', + parameters: { + type: 'object', + properties: { + content: { + type: 'string', + description: 'Full content to write to the file (replaces existing content)', + }, + file_path: { type: 'string', description: "Path of the file to write (e.g. 'SESSION.md')" }, + }, + required: ['file_path', 'content'], + }, +} export const CrawlWebsite: ToolCatalogEntry = { - id: "crawl_website", - name: "crawl_website", - route: "go", - mode: "sync", - parameters: {"type":"object","properties":{"exclude_paths":{"type":"array","description":"Skip URLs matching these patterns","items":{"type":"string"}},"include_paths":{"type":"array","description":"Only crawl URLs matching these patterns","items":{"type":"string"}},"limit":{"type":"number","description":"Maximum pages to crawl (default 10, max 50)"},"max_depth":{"type":"number","description":"How deep to follow links (default 2)"},"url":{"type":"string","description":"Starting URL to crawl from"}},"required":["url"]}, -}; + id: 'crawl_website', + name: 'crawl_website', + route: 'go', + mode: 'sync', + parameters: { + type: 'object', + properties: { + exclude_paths: { + type: 'array', + description: 'Skip URLs matching these patterns', + items: { type: 'string' }, + }, + include_paths: { + type: 'array', + description: 'Only crawl URLs matching these patterns', + items: { type: 'string' }, + }, + limit: { type: 'number', description: 'Maximum pages to crawl (default 10, max 50)' }, + max_depth: { type: 'number', description: 'How deep to follow links (default 2)' }, + url: { type: 'string', description: 'Starting URL to crawl from' }, + }, + required: ['url'], + }, +} export const CreateFile: ToolCatalogEntry = { - id: "create_file", - name: "create_file", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"contentType":{"type":"string","description":"Optional MIME type override. Usually omit and let the system infer from the file extension."},"fileName":{"type":"string","description":"Plain workspace filename including extension, e.g. \"main.py\" or \"report.md\". Must not contain slashes."}},"required":["fileName"]}, - resultSchema: {"type":"object","properties":{"data":{"type":"object","description":"Contains id (the fileId) and name."},"message":{"type":"string","description":"Human-readable outcome."},"success":{"type":"boolean","description":"Whether the file was created."}},"required":["success","message"]}, - requiredPermission: "write", -}; + id: 'create_file', + name: 'create_file', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + contentType: { + type: 'string', + description: + 'Optional MIME type override. Usually omit and let the system infer from the file extension.', + }, + fileName: { + type: 'string', + description: + 'Plain workspace filename including extension, e.g. "main.py" or "report.md". Must not contain slashes.', + }, + }, + required: ['fileName'], + }, + resultSchema: { + type: 'object', + properties: { + data: { type: 'object', description: 'Contains id (the fileId) and name.' }, + message: { type: 'string', description: 'Human-readable outcome.' }, + success: { type: 'boolean', description: 'Whether the file was created.' }, + }, + required: ['success', 'message'], + }, + requiredPermission: 'write', +} export const CreateFolder: ToolCatalogEntry = { - id: "create_folder", - name: "create_folder", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"name":{"type":"string","description":"Folder name."},"parentId":{"type":"string","description":"Optional parent folder ID."},"workspaceId":{"type":"string","description":"Optional workspace ID."}},"required":["name"]}, - requiredPermission: "write", -}; + id: 'create_folder', + name: 'create_folder', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + name: { type: 'string', description: 'Folder name.' }, + parentId: { type: 'string', description: 'Optional parent folder ID.' }, + workspaceId: { type: 'string', description: 'Optional workspace ID.' }, + }, + required: ['name'], + }, + requiredPermission: 'write', +} export const CreateJob: ToolCatalogEntry = { - id: "create_job", - name: "create_job", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"cron":{"type":"string","description":"Cron expression for recurring jobs (e.g., '*/5 * * * *' for every 5 minutes, '0 9 * * *' for daily at 9 AM). Omit for one-time jobs."},"lifecycle":{"type":"string","description":"'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called after the success condition is met.","enum":["persistent","until_complete"]},"maxRuns":{"type":"integer","description":"Maximum number of executions before the job auto-completes. Safety limit to prevent runaway polling."},"prompt":{"type":"string","description":"The prompt to execute when the job fires. This is sent to the Mothership as a user message."},"successCondition":{"type":"string","description":"What must happen for the job to be considered complete. Used with until_complete lifecycle (e.g., 'John has replied to the partnership email')."},"time":{"type":"string","description":"ISO 8601 datetime for one-time execution or as the start time for a cron schedule (e.g., '2026-03-06T09:00:00'). Include timezone offset or use the timezone parameter."},"timezone":{"type":"string","description":"IANA timezone for the schedule (e.g., 'America/New_York', 'Europe/London'). Defaults to UTC."},"title":{"type":"string","description":"A short, descriptive title for the job (e.g., 'Email Poller', 'Daily Report'). Used as the display name."}},"required":["title","prompt"]}, -}; + id: 'create_job', + name: 'create_job', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + cron: { + type: 'string', + description: + "Cron expression for recurring jobs (e.g., '*/5 * * * *' for every 5 minutes, '0 9 * * *' for daily at 9 AM). Omit for one-time jobs.", + }, + lifecycle: { + type: 'string', + description: + "'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called after the success condition is met.", + enum: ['persistent', 'until_complete'], + }, + maxRuns: { + type: 'integer', + description: + 'Maximum number of executions before the job auto-completes. Safety limit to prevent runaway polling.', + }, + prompt: { + type: 'string', + description: + 'The prompt to execute when the job fires. This is sent to the Mothership as a user message.', + }, + successCondition: { + type: 'string', + description: + "What must happen for the job to be considered complete. Used with until_complete lifecycle (e.g., 'John has replied to the partnership email').", + }, + time: { + type: 'string', + description: + "ISO 8601 datetime for one-time execution or as the start time for a cron schedule (e.g., '2026-03-06T09:00:00'). Include timezone offset or use the timezone parameter.", + }, + timezone: { + type: 'string', + description: + "IANA timezone for the schedule (e.g., 'America/New_York', 'Europe/London'). Defaults to UTC.", + }, + title: { + type: 'string', + description: + "A short, descriptive title for the job (e.g., 'Email Poller', 'Daily Report'). Used as the display name.", + }, + }, + required: ['title', 'prompt'], + }, +} export const CreateWorkflow: ToolCatalogEntry = { - id: "create_workflow", - name: "create_workflow", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"description":{"type":"string","description":"Optional workflow description."},"folderId":{"type":"string","description":"Optional folder ID."},"name":{"type":"string","description":"Workflow name."},"workspaceId":{"type":"string","description":"Optional workspace ID."}},"required":["name"]}, - requiredPermission: "write", -}; + id: 'create_workflow', + name: 'create_workflow', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + description: { type: 'string', description: 'Optional workflow description.' }, + folderId: { type: 'string', description: 'Optional folder ID.' }, + name: { type: 'string', description: 'Workflow name.' }, + workspaceId: { type: 'string', description: 'Optional workspace ID.' }, + }, + required: ['name'], + }, + requiredPermission: 'write', +} export const CreateWorkspaceMcpServer: ToolCatalogEntry = { - id: "create_workspace_mcp_server", - name: "create_workspace_mcp_server", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"description":{"type":"string","description":"Optional description for the server"},"name":{"type":"string","description":"Required: server name"},"workspaceId":{"type":"string","description":"Workspace ID (defaults to current workspace)"}},"required":["name"]}, + id: 'create_workspace_mcp_server', + name: 'create_workspace_mcp_server', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + description: { type: 'string', description: 'Optional description for the server' }, + name: { type: 'string', description: 'Required: server name' }, + workspaceId: { type: 'string', description: 'Workspace ID (defaults to current workspace)' }, + }, + required: ['name'], + }, requiresConfirmation: true, - requiredPermission: "admin", -}; + requiredPermission: 'admin', +} export const Debug: ToolCatalogEntry = { - id: "debug", - name: "debug", - route: "subagent", - mode: "async", - parameters: {"properties":{"context":{"description":"Pre-gathered context: workflow state JSON, block schemas, error logs. The debug agent will skip re-reading anything included here.","type":"string"},"request":{"description":"What to debug. Include error messages, block IDs, and any context about the failure.","type":"string"}},"required":["request"],"type":"object"}, - subagentId: "debug", + id: 'debug', + name: 'debug', + route: 'subagent', + mode: 'async', + parameters: { + properties: { + context: { + description: + 'Pre-gathered context: workflow state JSON, block schemas, error logs. The debug agent will skip re-reading anything included here.', + type: 'string', + }, + request: { + description: + 'What to debug. Include error messages, block IDs, and any context about the failure.', + type: 'string', + }, + }, + required: ['request'], + type: 'object', + }, + subagentId: 'debug', internal: true, -}; +} export const DeleteFile: ToolCatalogEntry = { - id: "delete_file", - name: "delete_file", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"fileIds":{"type":"array","description":"Canonical workspace file IDs of the files to delete.","items":{"type":"string"}}},"required":["fileIds"]}, - resultSchema: {"type":"object","properties":{"message":{"type":"string","description":"Human-readable outcome."},"success":{"type":"boolean","description":"Whether the delete succeeded."}},"required":["success","message"]}, - requiredPermission: "write", -}; + id: 'delete_file', + name: 'delete_file', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + fileIds: { + type: 'array', + description: 'Canonical workspace file IDs of the files to delete.', + items: { type: 'string' }, + }, + }, + required: ['fileIds'], + }, + resultSchema: { + type: 'object', + properties: { + message: { type: 'string', description: 'Human-readable outcome.' }, + success: { type: 'boolean', description: 'Whether the delete succeeded.' }, + }, + required: ['success', 'message'], + }, + requiredPermission: 'write', +} export const DeleteFolder: ToolCatalogEntry = { - id: "delete_folder", - name: "delete_folder", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"folderIds":{"type":"array","description":"The folder IDs to delete.","items":{"type":"string"}}},"required":["folderIds"]}, + id: 'delete_folder', + name: 'delete_folder', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + folderIds: { + type: 'array', + description: 'The folder IDs to delete.', + items: { type: 'string' }, + }, + }, + required: ['folderIds'], + }, requiresConfirmation: true, - requiredPermission: "write", -}; + requiredPermission: 'write', +} export const DeleteWorkflow: ToolCatalogEntry = { - id: "delete_workflow", - name: "delete_workflow", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"workflowIds":{"type":"array","description":"The workflow IDs to delete.","items":{"type":"string"}}},"required":["workflowIds"]}, + id: 'delete_workflow', + name: 'delete_workflow', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + workflowIds: { + type: 'array', + description: 'The workflow IDs to delete.', + items: { type: 'string' }, + }, + }, + required: ['workflowIds'], + }, requiresConfirmation: true, - requiredPermission: "write", -}; + requiredPermission: 'write', +} export const DeleteWorkspaceMcpServer: ToolCatalogEntry = { - id: "delete_workspace_mcp_server", - name: "delete_workspace_mcp_server", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"serverId":{"type":"string","description":"Required: the MCP server ID to delete"}},"required":["serverId"]}, + id: 'delete_workspace_mcp_server', + name: 'delete_workspace_mcp_server', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + serverId: { type: 'string', description: 'Required: the MCP server ID to delete' }, + }, + required: ['serverId'], + }, requiresConfirmation: true, - requiredPermission: "admin", -}; + requiredPermission: 'admin', +} export const Deploy: ToolCatalogEntry = { - id: "deploy", - name: "deploy", - route: "subagent", - mode: "async", - parameters: {"properties":{"request":{"description":"Detailed deployment instructions. Include deployment type (api/chat) and ALL user-specified options: identifier, title, description, authType, password, allowedEmails, welcomeMessage, outputConfigs (block outputs to display).","type":"string"}},"required":["request"],"type":"object"}, - subagentId: "deploy", + id: 'deploy', + name: 'deploy', + route: 'subagent', + mode: 'async', + parameters: { + properties: { + request: { + description: + 'Detailed deployment instructions. Include deployment type (api/chat) and ALL user-specified options: identifier, title, description, authType, password, allowedEmails, welcomeMessage, outputConfigs (block outputs to display).', + type: 'string', + }, + }, + required: ['request'], + type: 'object', + }, + subagentId: 'deploy', internal: true, -}; +} export const DeployApi: ToolCatalogEntry = { - id: "deploy_api", - name: "deploy_api", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"action":{"type":"string","description":"Whether to deploy or undeploy the API endpoint","enum":["deploy","undeploy"],"default":"deploy"},"workflowId":{"type":"string","description":"Workflow ID to deploy (required in workspace context)"}}}, - resultSchema: {"type":"object","properties":{"apiEndpoint":{"type":"string","description":"Canonical workflow execution endpoint."},"baseUrl":{"type":"string","description":"Base URL used to construct deployment URLs."},"deployedAt":{"type":"string","description":"Deployment timestamp when the workflow is deployed."},"deploymentConfig":{"type":"object","description":"Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details."},"deploymentStatus":{"type":"object","description":"Structured per-surface deployment status keyed by surface name, such as api."},"deploymentType":{"type":"string","description":"Deployment surface this result describes. For deploy_api and redeploy this is always \"api\"."},"examples":{"type":"object","description":"Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling."},"isDeployed":{"type":"boolean","description":"Whether the workflow API is currently deployed after this tool call."},"version":{"type":"number","description":"Deployment version for the current API deployment."},"workflowId":{"type":"string","description":"Workflow ID that was deployed or undeployed."}},"required":["workflowId","isDeployed","deploymentType","deploymentStatus","deploymentConfig","examples"]}, + id: 'deploy_api', + name: 'deploy_api', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + action: { + type: 'string', + description: 'Whether to deploy or undeploy the API endpoint', + enum: ['deploy', 'undeploy'], + default: 'deploy', + }, + workflowId: { + type: 'string', + description: 'Workflow ID to deploy (required in workspace context)', + }, + }, + }, + resultSchema: { + type: 'object', + properties: { + apiEndpoint: { type: 'string', description: 'Canonical workflow execution endpoint.' }, + baseUrl: { type: 'string', description: 'Base URL used to construct deployment URLs.' }, + deployedAt: { + type: 'string', + description: 'Deployment timestamp when the workflow is deployed.', + }, + deploymentConfig: { + type: 'object', + description: + 'Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details.', + }, + deploymentStatus: { + type: 'object', + description: 'Structured per-surface deployment status keyed by surface name, such as api.', + }, + deploymentType: { + type: 'string', + description: + 'Deployment surface this result describes. For deploy_api and redeploy this is always "api".', + }, + examples: { + type: 'object', + description: + 'Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling.', + }, + isDeployed: { + type: 'boolean', + description: 'Whether the workflow API is currently deployed after this tool call.', + }, + version: { + type: 'number', + description: 'Deployment version for the current API deployment.', + }, + workflowId: { type: 'string', description: 'Workflow ID that was deployed or undeployed.' }, + }, + required: [ + 'workflowId', + 'isDeployed', + 'deploymentType', + 'deploymentStatus', + 'deploymentConfig', + 'examples', + ], + }, requiresConfirmation: true, - requiredPermission: "admin", -}; + requiredPermission: 'admin', +} export const DeployChat: ToolCatalogEntry = { - id: "deploy_chat", - name: "deploy_chat", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"action":{"type":"string","description":"Whether to deploy or undeploy the chat interface","enum":["deploy","undeploy"],"default":"deploy"},"allowedEmails":{"type":"array","description":"List of allowed emails/domains for email or SSO auth","items":{"type":"string"}},"authType":{"type":"string","description":"Authentication type: public, password, email, or sso","enum":["public","password","email","sso"],"default":"public"},"description":{"type":"string","description":"Optional description for the chat"},"identifier":{"type":"string","description":"URL slug for the chat (lowercase letters, numbers, hyphens only)"},"outputConfigs":{"type":"array","description":"Output configurations specifying which block outputs to display in chat","items":{"type":"object","properties":{"blockId":{"type":"string","description":"The block UUID"},"path":{"type":"string","description":"The output path (e.g. 'response', 'response.content')"}},"required":["blockId","path"]}},"password":{"type":"string","description":"Password for password-protected chats"},"title":{"type":"string","description":"Display title for the chat interface"},"welcomeMessage":{"type":"string","description":"Welcome message shown to users"},"workflowId":{"type":"string","description":"Workflow ID to deploy (required in workspace context)"}}}, - resultSchema: {"type":"object","properties":{"action":{"type":"string","description":"Action performed by the tool, such as \"deploy\" or \"undeploy\"."},"apiEndpoint":{"type":"string","description":"Paired workflow execution endpoint used by the chat deployment."},"baseUrl":{"type":"string","description":"Base URL used to construct deployment URLs."},"chatUrl":{"type":"string","description":"Shareable chat URL when the chat surface is deployed."},"deployedAt":{"type":"string","description":"Deployment timestamp for the underlying workflow deployment."},"deploymentConfig":{"type":"object","description":"Structured deployment configuration keyed by surface name. Includes chat settings and the paired API invocation configuration."},"deploymentStatus":{"type":"object","description":"Structured per-surface deployment status keyed by surface name, including api and chat."},"deploymentType":{"type":"string","description":"Deployment surface this result describes. For deploy_chat this is always \"chat\"."},"examples":{"type":"object","description":"Invocation examples keyed by surface name. Includes chat access details and API curl examples."},"identifier":{"type":"string","description":"Chat identifier or slug."},"isChatDeployed":{"type":"boolean","description":"Whether the chat surface is deployed after this tool call."},"isDeployed":{"type":"boolean","description":"Whether the paired API surface remains deployed after this tool call."},"success":{"type":"boolean","description":"Whether the deploy_chat action completed successfully."},"version":{"type":"number","description":"Deployment version for the underlying workflow deployment."},"workflowId":{"type":"string","description":"Workflow ID associated with the chat deployment."}},"required":["workflowId","success","action","isDeployed","isChatDeployed","deploymentType","deploymentStatus","deploymentConfig","examples"]}, + id: 'deploy_chat', + name: 'deploy_chat', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + action: { + type: 'string', + description: 'Whether to deploy or undeploy the chat interface', + enum: ['deploy', 'undeploy'], + default: 'deploy', + }, + allowedEmails: { + type: 'array', + description: 'List of allowed emails/domains for email or SSO auth', + items: { type: 'string' }, + }, + authType: { + type: 'string', + description: 'Authentication type: public, password, email, or sso', + enum: ['public', 'password', 'email', 'sso'], + default: 'public', + }, + description: { type: 'string', description: 'Optional description for the chat' }, + identifier: { + type: 'string', + description: 'URL slug for the chat (lowercase letters, numbers, hyphens only)', + }, + outputConfigs: { + type: 'array', + description: 'Output configurations specifying which block outputs to display in chat', + items: { + type: 'object', + properties: { + blockId: { type: 'string', description: 'The block UUID' }, + path: { + type: 'string', + description: "The output path (e.g. 'response', 'response.content')", + }, + }, + required: ['blockId', 'path'], + }, + }, + password: { type: 'string', description: 'Password for password-protected chats' }, + title: { type: 'string', description: 'Display title for the chat interface' }, + welcomeMessage: { type: 'string', description: 'Welcome message shown to users' }, + workflowId: { + type: 'string', + description: 'Workflow ID to deploy (required in workspace context)', + }, + }, + }, + resultSchema: { + type: 'object', + properties: { + action: { + type: 'string', + description: 'Action performed by the tool, such as "deploy" or "undeploy".', + }, + apiEndpoint: { + type: 'string', + description: 'Paired workflow execution endpoint used by the chat deployment.', + }, + baseUrl: { type: 'string', description: 'Base URL used to construct deployment URLs.' }, + chatUrl: { + type: 'string', + description: 'Shareable chat URL when the chat surface is deployed.', + }, + deployedAt: { + type: 'string', + description: 'Deployment timestamp for the underlying workflow deployment.', + }, + deploymentConfig: { + type: 'object', + description: + 'Structured deployment configuration keyed by surface name. Includes chat settings and the paired API invocation configuration.', + }, + deploymentStatus: { + type: 'object', + description: + 'Structured per-surface deployment status keyed by surface name, including api and chat.', + }, + deploymentType: { + type: 'string', + description: + 'Deployment surface this result describes. For deploy_chat this is always "chat".', + }, + examples: { + type: 'object', + description: + 'Invocation examples keyed by surface name. Includes chat access details and API curl examples.', + }, + identifier: { type: 'string', description: 'Chat identifier or slug.' }, + isChatDeployed: { + type: 'boolean', + description: 'Whether the chat surface is deployed after this tool call.', + }, + isDeployed: { + type: 'boolean', + description: 'Whether the paired API surface remains deployed after this tool call.', + }, + success: { + type: 'boolean', + description: 'Whether the deploy_chat action completed successfully.', + }, + version: { + type: 'number', + description: 'Deployment version for the underlying workflow deployment.', + }, + workflowId: { + type: 'string', + description: 'Workflow ID associated with the chat deployment.', + }, + }, + required: [ + 'workflowId', + 'success', + 'action', + 'isDeployed', + 'isChatDeployed', + 'deploymentType', + 'deploymentStatus', + 'deploymentConfig', + 'examples', + ], + }, requiresConfirmation: true, - requiredPermission: "admin", -}; + requiredPermission: 'admin', +} export const DeployMcp: ToolCatalogEntry = { - id: "deploy_mcp", - name: "deploy_mcp", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"parameterDescriptions":{"type":"array","description":"Array of parameter descriptions for the tool","items":{"type":"object","properties":{"description":{"type":"string","description":"Parameter description"},"name":{"type":"string","description":"Parameter name"}},"required":["name","description"]}},"serverId":{"type":"string","description":"Required: server ID from list_workspace_mcp_servers"},"toolDescription":{"type":"string","description":"Description for the MCP tool"},"toolName":{"type":"string","description":"Name for the MCP tool (defaults to workflow name)"},"workflowId":{"type":"string","description":"Workflow ID (defaults to active workflow)"}},"required":["serverId"]}, - resultSchema: {"type":"object","properties":{"action":{"type":"string","description":"Action performed by the tool, such as \"deploy\" or \"undeploy\"."},"apiEndpoint":{"type":"string","description":"Underlying workflow API endpoint associated with the MCP tool."},"baseUrl":{"type":"string","description":"Base URL used to construct deployment URLs."},"deploymentConfig":{"type":"object","description":"Structured deployment configuration keyed by surface name. Includes MCP server, tool, auth, and parameter schema details."},"deploymentStatus":{"type":"object","description":"Structured per-surface deployment status keyed by surface name, including mcp and the underlying api surface when applicable."},"deploymentType":{"type":"string","description":"Deployment surface this result describes. For deploy_mcp this is always \"mcp\"."},"examples":{"type":"object","description":"Setup examples keyed by surface name. Includes ready-to-paste config snippets for supported MCP clients."},"mcpServerUrl":{"type":"string","description":"HTTP MCP server URL to configure in clients."},"removed":{"type":"boolean","description":"Whether the MCP deployment was removed during an undeploy action."},"serverId":{"type":"string","description":"Workspace MCP server ID."},"serverName":{"type":"string","description":"Workspace MCP server name."},"toolDescription":{"type":"string","description":"MCP tool description exposed on the server."},"toolId":{"type":"string","description":"MCP tool ID when deployed."},"toolName":{"type":"string","description":"MCP tool name exposed on the server."},"updated":{"type":"boolean","description":"Whether an existing MCP tool deployment was updated instead of created."},"workflowId":{"type":"string","description":"Workflow ID associated with the MCP deployment."}},"required":["deploymentType","deploymentStatus"]}, + id: 'deploy_mcp', + name: 'deploy_mcp', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + parameterDescriptions: { + type: 'array', + description: 'Array of parameter descriptions for the tool', + items: { + type: 'object', + properties: { + description: { type: 'string', description: 'Parameter description' }, + name: { type: 'string', description: 'Parameter name' }, + }, + required: ['name', 'description'], + }, + }, + serverId: { + type: 'string', + description: 'Required: server ID from list_workspace_mcp_servers', + }, + toolDescription: { type: 'string', description: 'Description for the MCP tool' }, + toolName: { + type: 'string', + description: 'Name for the MCP tool (defaults to workflow name)', + }, + workflowId: { type: 'string', description: 'Workflow ID (defaults to active workflow)' }, + }, + required: ['serverId'], + }, + resultSchema: { + type: 'object', + properties: { + action: { + type: 'string', + description: 'Action performed by the tool, such as "deploy" or "undeploy".', + }, + apiEndpoint: { + type: 'string', + description: 'Underlying workflow API endpoint associated with the MCP tool.', + }, + baseUrl: { type: 'string', description: 'Base URL used to construct deployment URLs.' }, + deploymentConfig: { + type: 'object', + description: + 'Structured deployment configuration keyed by surface name. Includes MCP server, tool, auth, and parameter schema details.', + }, + deploymentStatus: { + type: 'object', + description: + 'Structured per-surface deployment status keyed by surface name, including mcp and the underlying api surface when applicable.', + }, + deploymentType: { + type: 'string', + description: + 'Deployment surface this result describes. For deploy_mcp this is always "mcp".', + }, + examples: { + type: 'object', + description: + 'Setup examples keyed by surface name. Includes ready-to-paste config snippets for supported MCP clients.', + }, + mcpServerUrl: { type: 'string', description: 'HTTP MCP server URL to configure in clients.' }, + removed: { + type: 'boolean', + description: 'Whether the MCP deployment was removed during an undeploy action.', + }, + serverId: { type: 'string', description: 'Workspace MCP server ID.' }, + serverName: { type: 'string', description: 'Workspace MCP server name.' }, + toolDescription: { + type: 'string', + description: 'MCP tool description exposed on the server.', + }, + toolId: { type: 'string', description: 'MCP tool ID when deployed.' }, + toolName: { type: 'string', description: 'MCP tool name exposed on the server.' }, + updated: { + type: 'boolean', + description: 'Whether an existing MCP tool deployment was updated instead of created.', + }, + workflowId: { + type: 'string', + description: 'Workflow ID associated with the MCP deployment.', + }, + }, + required: ['deploymentType', 'deploymentStatus'], + }, requiresConfirmation: true, - requiredPermission: "admin", -}; + requiredPermission: 'admin', +} export const DownloadToWorkspaceFile: ToolCatalogEntry = { - id: "download_to_workspace_file", - name: "download_to_workspace_file", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"fileName":{"type":"string","description":"Optional workspace file name to save as. If omitted, the name is inferred from the response or URL."},"url":{"type":"string","description":"Direct URL of the file to download, such as an image CDN URL ending in .png or .jpg"}},"required":["url"]}, - requiredPermission: "write", -}; + id: 'download_to_workspace_file', + name: 'download_to_workspace_file', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + fileName: { + type: 'string', + description: + 'Optional workspace file name to save as. If omitted, the name is inferred from the response or URL.', + }, + url: { + type: 'string', + description: + 'Direct URL of the file to download, such as an image CDN URL ending in .png or .jpg', + }, + }, + required: ['url'], + }, + requiredPermission: 'write', +} export const EditContent: ToolCatalogEntry = { - id: "edit_content", - name: "edit_content", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"content":{"type":"string","description":"The text content to write. For append: text to append. For update: full replacement text. For patch with search_replace: the replacement text. For patch with anchored: the insert/replacement text."}},"required":["content"]}, - resultSchema: {"type":"object","properties":{"data":{"type":"object","description":"Optional operation metadata such as file id, file name, size, and content type."},"message":{"type":"string","description":"Human-readable summary of the outcome."},"success":{"type":"boolean","description":"Whether the content was applied successfully."}},"required":["success","message"]}, - requiredPermission: "write", -}; + id: 'edit_content', + name: 'edit_content', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + content: { + type: 'string', + description: + 'The text content to write. For append: text to append. For update: full replacement text. For patch with search_replace: the replacement text. For patch with anchored: the insert/replacement text.', + }, + }, + required: ['content'], + }, + resultSchema: { + type: 'object', + properties: { + data: { + type: 'object', + description: + 'Optional operation metadata such as file id, file name, size, and content type.', + }, + message: { type: 'string', description: 'Human-readable summary of the outcome.' }, + success: { type: 'boolean', description: 'Whether the content was applied successfully.' }, + }, + required: ['success', 'message'], + }, + requiredPermission: 'write', +} export const EditWorkflow: ToolCatalogEntry = { - id: "edit_workflow", - name: "edit_workflow", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"operations":{"type":"array","description":"Array of edit operations","items":{"type":"object","properties":{"block_id":{"type":"string","description":"Block ID for the operation. For add operations, this will be the desired ID for the new block."},"operation_type":{"type":"string","description":"Type of operation to perform","enum":["add","edit","delete","insert_into_subflow","extract_from_subflow"]},"params":{"type":"object","description":"Parameters for the operation. \nFor edit: {\"inputs\": {\"temperature\": 0.5}} NOT {\"subBlocks\": {\"temperature\": {\"value\": 0.5}}}\nFor add: {\"type\": \"agent\", \"name\": \"My Agent\", \"inputs\": {\"model\": \"gpt-4o\"}}\nFor delete: {} (empty object)"}},"required":["operation_type","block_id","params"]}},"workflowId":{"type":"string","description":"Optional workflow ID to edit. If not provided, uses the current workflow in context."}},"required":["operations"]}, - requiredPermission: "write", -}; + id: 'edit_workflow', + name: 'edit_workflow', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + operations: { + type: 'array', + description: 'Array of edit operations', + items: { + type: 'object', + properties: { + block_id: { + type: 'string', + description: + 'Block ID for the operation. For add operations, this will be the desired ID for the new block.', + }, + operation_type: { + type: 'string', + description: 'Type of operation to perform', + enum: ['add', 'edit', 'delete', 'insert_into_subflow', 'extract_from_subflow'], + }, + params: { + type: 'object', + description: + 'Parameters for the operation. \nFor edit: {"inputs": {"temperature": 0.5}} NOT {"subBlocks": {"temperature": {"value": 0.5}}}\nFor add: {"type": "agent", "name": "My Agent", "inputs": {"model": "gpt-4o"}}\nFor delete: {} (empty object)', + }, + }, + required: ['operation_type', 'block_id', 'params'], + }, + }, + workflowId: { + type: 'string', + description: + 'Optional workflow ID to edit. If not provided, uses the current workflow in context.', + }, + }, + required: ['operations'], + }, + requiredPermission: 'write', +} export const File: ToolCatalogEntry = { - id: "file", - name: "file", - route: "subagent", - mode: "async", - parameters: {"type":"object"}, - subagentId: "file", + id: 'file', + name: 'file', + route: 'subagent', + mode: 'async', + parameters: { type: 'object' }, + subagentId: 'file', internal: true, -}; +} export const FunctionExecute: ToolCatalogEntry = { - id: "function_execute", - name: "function_execute", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"code":{"type":"string","description":"Code to execute. For JS: raw statements auto-wrapped in async context. For Python: full script. For shell: bash script with access to pre-installed CLI tools and workspace env vars as $VAR_NAME."},"inputFiles":{"type":"array","description":"Canonical workspace file IDs to mount in the sandbox. Discover IDs via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\"). Mounted path: /home/user/files/{fileId}/{originalName}. Example: [\"wf_123\"]","items":{"type":"string"}},"inputTables":{"type":"array","description":"Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Example: [\"tbl_abc123\"]","items":{"type":"string"}},"language":{"type":"string","description":"Execution language.","enum":["javascript","python","shell"]},"outputFormat":{"type":"string","description":"Format for outputPath. Determines how the code result is serialized. If omitted, inferred from outputPath file extension.","enum":["json","csv","txt","md","html"]},"outputMimeType":{"type":"string","description":"MIME type for outputSandboxPath export. Required for binary files: image/png, image/jpeg, application/pdf, etc. Omit for text files."},"outputPath":{"type":"string","description":"Pipe output directly to a NEW workspace file instead of returning in context. ALWAYS use this instead of a separate workspace_file write call. Use a flat path like \"files/result.json\" — nested paths are not supported."},"outputSandboxPath":{"type":"string","description":"Path to a file created inside the sandbox that should be exported to the workspace. Use together with outputPath."},"outputTable":{"type":"string","description":"Table ID to overwrite with the code's return value. Code MUST return an array of objects where keys match column names. All existing rows are replaced. Example: \"tbl_abc123\""}},"required":["code"]}, - requiredPermission: "write", -}; + id: 'function_execute', + name: 'function_execute', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + code: { + type: 'string', + description: + 'Code to execute. For JS: raw statements auto-wrapped in async context. For Python: full script. For shell: bash script with access to pre-installed CLI tools and workspace env vars as $VAR_NAME.', + }, + inputFiles: { + type: 'array', + description: + 'Canonical workspace file IDs to mount in the sandbox. Discover IDs via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json"). Mounted path: /home/user/files/{fileId}/{originalName}. Example: ["wf_123"]', + items: { type: 'string' }, + }, + inputTables: { + type: 'array', + description: + 'Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Example: ["tbl_abc123"]', + items: { type: 'string' }, + }, + language: { + type: 'string', + description: 'Execution language.', + enum: ['javascript', 'python', 'shell'], + }, + outputFormat: { + type: 'string', + description: + 'Format for outputPath. Determines how the code result is serialized. If omitted, inferred from outputPath file extension.', + enum: ['json', 'csv', 'txt', 'md', 'html'], + }, + outputMimeType: { + type: 'string', + description: + 'MIME type for outputSandboxPath export. Required for binary files: image/png, image/jpeg, application/pdf, etc. Omit for text files.', + }, + outputPath: { + type: 'string', + description: + 'Pipe output directly to a NEW workspace file instead of returning in context. ALWAYS use this instead of a separate workspace_file write call. Use a flat path like "files/result.json" — nested paths are not supported.', + }, + outputSandboxPath: { + type: 'string', + description: + 'Path to a file created inside the sandbox that should be exported to the workspace. Use together with outputPath.', + }, + outputTable: { + type: 'string', + description: + 'Table ID to overwrite with the code\'s return value. Code MUST return an array of objects where keys match column names. All existing rows are replaced. Example: "tbl_abc123"', + }, + }, + required: ['code'], + }, + requiredPermission: 'write', +} export const GenerateApiKey: ToolCatalogEntry = { - id: "generate_api_key", - name: "generate_api_key", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"name":{"type":"string","description":"A descriptive name for the API key (e.g., 'production-key', 'dev-testing')."},"workspaceId":{"type":"string","description":"Optional workspace ID. Defaults to user's default workspace."}},"required":["name"]}, + id: 'generate_api_key', + name: 'generate_api_key', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + name: { + type: 'string', + description: "A descriptive name for the API key (e.g., 'production-key', 'dev-testing').", + }, + workspaceId: { + type: 'string', + description: "Optional workspace ID. Defaults to user's default workspace.", + }, + }, + required: ['name'], + }, requiresConfirmation: true, - requiredPermission: "admin", -}; + requiredPermission: 'admin', +} export const GenerateImage: ToolCatalogEntry = { - id: "generate_image", - name: "generate_image", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"aspectRatio":{"type":"string","description":"Aspect ratio for the generated image.","enum":["1:1","16:9","9:16","4:3","3:4"]},"fileName":{"type":"string","description":"Output file name. Defaults to \"generated-image.png\". Workspace files are flat, so pass a plain file name, not a nested path."},"overwriteFileId":{"type":"string","description":"If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated image so the existing chat resource stays current instead of creating a duplicate like \"image (1).png\". The file ID is returned by previous generate_image or generate_visualization calls (fileId field), or can be found via read(\"files/by-id/{fileId}/meta.json\")."},"prompt":{"type":"string","description":"Detailed text description of the image to generate, or editing instructions when used with editFileId."},"referenceFileIds":{"type":"array","description":"File IDs of workspace images to include as context for the generation. All images are sent alongside the prompt. Use for: editing a single image (1 file), compositing multiple images together (2+ files), style transfer, face swapping, etc. Order matters — list the primary/base image first. When revising an existing image in place, pair the primary file ID here with overwriteFileId set to that same ID.","items":{"type":"string"}}},"required":["prompt"]}, - requiredPermission: "write", -}; + id: 'generate_image', + name: 'generate_image', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + aspectRatio: { + type: 'string', + description: 'Aspect ratio for the generated image.', + enum: ['1:1', '16:9', '9:16', '4:3', '3:4'], + }, + fileName: { + type: 'string', + description: + 'Output file name. Defaults to "generated-image.png". Workspace files are flat, so pass a plain file name, not a nested path.', + }, + overwriteFileId: { + type: 'string', + description: + 'If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated image so the existing chat resource stays current instead of creating a duplicate like "image (1).png". The file ID is returned by previous generate_image or generate_visualization calls (fileId field), or can be found via read("files/by-id/{fileId}/meta.json").', + }, + prompt: { + type: 'string', + description: + 'Detailed text description of the image to generate, or editing instructions when used with editFileId.', + }, + referenceFileIds: { + type: 'array', + description: + 'File IDs of workspace images to include as context for the generation. All images are sent alongside the prompt. Use for: editing a single image (1 file), compositing multiple images together (2+ files), style transfer, face swapping, etc. Order matters — list the primary/base image first. When revising an existing image in place, pair the primary file ID here with overwriteFileId set to that same ID.', + items: { type: 'string' }, + }, + }, + required: ['prompt'], + }, + requiredPermission: 'write', +} export const GenerateVisualization: ToolCatalogEntry = { - id: "generate_visualization", - name: "generate_visualization", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"code":{"type":"string","description":"Python code that generates a visualization using matplotlib. MUST call plt.savefig('/home/user/output.png', dpi=150, bbox_inches='tight') to produce output."},"fileName":{"type":"string","description":"Output file name. Defaults to \"chart.png\". Workspace files are flat, so pass a plain file name, not a nested path."},"inputFiles":{"type":"array","description":"Canonical workspace file IDs to mount in the sandbox. Discover IDs via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\"). Mounted path: /home/user/files/{fileId}/{originalName}.","items":{"type":"string"}},"inputTables":{"type":"array","description":"Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Read with pandas: pd.read_csv('/home/user/tables/tbl_xxx.csv')","items":{"type":"string"}},"overwriteFileId":{"type":"string","description":"If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated chart so the existing chat resource stays current instead of creating a duplicate like \"chart (1).png\". The file ID is returned by previous generate_visualization or generate_image calls (fileId field), or can be found via read(\"files/by-id/{fileId}/meta.json\")."}},"required":["code"]}, - requiredPermission: "write", -}; + id: 'generate_visualization', + name: 'generate_visualization', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + code: { + type: 'string', + description: + "Python code that generates a visualization using matplotlib. MUST call plt.savefig('/home/user/output.png', dpi=150, bbox_inches='tight') to produce output.", + }, + fileName: { + type: 'string', + description: + 'Output file name. Defaults to "chart.png". Workspace files are flat, so pass a plain file name, not a nested path.', + }, + inputFiles: { + type: 'array', + description: + 'Canonical workspace file IDs to mount in the sandbox. Discover IDs via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json"). Mounted path: /home/user/files/{fileId}/{originalName}.', + items: { type: 'string' }, + }, + inputTables: { + type: 'array', + description: + "Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Read with pandas: pd.read_csv('/home/user/tables/tbl_xxx.csv')", + items: { type: 'string' }, + }, + overwriteFileId: { + type: 'string', + description: + 'If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated chart so the existing chat resource stays current instead of creating a duplicate like "chart (1).png". The file ID is returned by previous generate_visualization or generate_image calls (fileId field), or can be found via read("files/by-id/{fileId}/meta.json").', + }, + }, + required: ['code'], + }, + requiredPermission: 'write', +} export const GetBlockOutputs: ToolCatalogEntry = { - id: "get_block_outputs", - name: "get_block_outputs", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"blockIds":{"type":"array","description":"Optional array of block UUIDs. If provided, returns outputs only for those blocks. If not provided, returns outputs for all blocks in the workflow.","items":{"type":"string"}},"workflowId":{"type":"string","description":"Optional workflow ID. If not provided, uses the current workflow in context."}}}, -}; + id: 'get_block_outputs', + name: 'get_block_outputs', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + blockIds: { + type: 'array', + description: + 'Optional array of block UUIDs. If provided, returns outputs only for those blocks. If not provided, returns outputs for all blocks in the workflow.', + items: { type: 'string' }, + }, + workflowId: { + type: 'string', + description: 'Optional workflow ID. If not provided, uses the current workflow in context.', + }, + }, + }, +} export const GetBlockUpstreamReferences: ToolCatalogEntry = { - id: "get_block_upstream_references", - name: "get_block_upstream_references", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"blockIds":{"type":"array","description":"Required array of block UUIDs (minimum 1). Returns what each block can reference based on its position in the workflow graph.","items":{"type":"string"}},"workflowId":{"type":"string","description":"Optional workflow ID. If not provided, uses the current workflow in context."}},"required":["blockIds"]}, -}; + id: 'get_block_upstream_references', + name: 'get_block_upstream_references', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + blockIds: { + type: 'array', + description: + 'Required array of block UUIDs (minimum 1). Returns what each block can reference based on its position in the workflow graph.', + items: { type: 'string' }, + }, + workflowId: { + type: 'string', + description: 'Optional workflow ID. If not provided, uses the current workflow in context.', + }, + }, + required: ['blockIds'], + }, +} export const GetDeployedWorkflowState: ToolCatalogEntry = { - id: "get_deployed_workflow_state", - name: "get_deployed_workflow_state", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"workflowId":{"type":"string","description":"Optional workflow ID. If not provided, uses the current workflow in context."}}}, -}; + id: 'get_deployed_workflow_state', + name: 'get_deployed_workflow_state', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'Optional workflow ID. If not provided, uses the current workflow in context.', + }, + }, + }, +} export const GetDeploymentVersion: ToolCatalogEntry = { - id: "get_deployment_version", - name: "get_deployment_version", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"version":{"type":"number","description":"The deployment version number"},"workflowId":{"type":"string","description":"The workflow ID"}},"required":["workflowId","version"]}, -}; + id: 'get_deployment_version', + name: 'get_deployment_version', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + version: { type: 'number', description: 'The deployment version number' }, + workflowId: { type: 'string', description: 'The workflow ID' }, + }, + required: ['workflowId', 'version'], + }, +} export const GetExecutionSummary: ToolCatalogEntry = { - id: "get_execution_summary", - name: "get_execution_summary", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"limit":{"type":"number","description":"Max number of executions to return (default: 10, max: 20)."},"status":{"type":"string","description":"Filter by status: 'success', 'error', or 'all' (default: 'all').","enum":["success","error","all"]},"workflowId":{"type":"string","description":"Optional workflow ID. If omitted, returns executions across all workflows in the workspace."},"workspaceId":{"type":"string","description":"Workspace ID to scope executions to."}},"required":["workspaceId"]}, -}; + id: 'get_execution_summary', + name: 'get_execution_summary', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + limit: { + type: 'number', + description: 'Max number of executions to return (default: 10, max: 20).', + }, + status: { + type: 'string', + description: "Filter by status: 'success', 'error', or 'all' (default: 'all').", + enum: ['success', 'error', 'all'], + }, + workflowId: { + type: 'string', + description: + 'Optional workflow ID. If omitted, returns executions across all workflows in the workspace.', + }, + workspaceId: { type: 'string', description: 'Workspace ID to scope executions to.' }, + }, + required: ['workspaceId'], + }, +} export const GetJobLogs: ToolCatalogEntry = { - id: "get_job_logs", - name: "get_job_logs", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"executionId":{"type":"string","description":"Optional execution ID for a specific run."},"includeDetails":{"type":"boolean","description":"Include tool calls, outputs, and cost details."},"jobId":{"type":"string","description":"The job (schedule) ID to get logs for."},"limit":{"type":"number","description":"Max number of entries (default: 3, max: 5)"}},"required":["jobId"]}, -}; + id: 'get_job_logs', + name: 'get_job_logs', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + executionId: { type: 'string', description: 'Optional execution ID for a specific run.' }, + includeDetails: { + type: 'boolean', + description: 'Include tool calls, outputs, and cost details.', + }, + jobId: { type: 'string', description: 'The job (schedule) ID to get logs for.' }, + limit: { type: 'number', description: 'Max number of entries (default: 3, max: 5)' }, + }, + required: ['jobId'], + }, +} export const GetPageContents: ToolCatalogEntry = { - id: "get_page_contents", - name: "get_page_contents", - route: "go", - mode: "sync", - parameters: {"type":"object","properties":{"include_highlights":{"type":"boolean","description":"Include key highlights (default false)"},"include_summary":{"type":"boolean","description":"Include AI-generated summary (default false)"},"include_text":{"type":"boolean","description":"Include full page text (default true)"},"urls":{"type":"array","description":"URLs to get content from (max 10)","items":{"type":"string"}}},"required":["urls"]}, -}; + id: 'get_page_contents', + name: 'get_page_contents', + route: 'go', + mode: 'sync', + parameters: { + type: 'object', + properties: { + include_highlights: { + type: 'boolean', + description: 'Include key highlights (default false)', + }, + include_summary: { + type: 'boolean', + description: 'Include AI-generated summary (default false)', + }, + include_text: { type: 'boolean', description: 'Include full page text (default true)' }, + urls: { + type: 'array', + description: 'URLs to get content from (max 10)', + items: { type: 'string' }, + }, + }, + required: ['urls'], + }, +} export const GetPlatformActions: ToolCatalogEntry = { - id: "get_platform_actions", - name: "get_platform_actions", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{}}, -}; + id: 'get_platform_actions', + name: 'get_platform_actions', + route: 'sim', + mode: 'async', + parameters: { type: 'object', properties: {} }, +} export const GetWorkflowData: ToolCatalogEntry = { - id: "get_workflow_data", - name: "get_workflow_data", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"data_type":{"type":"string","description":"The type of workflow data to retrieve","enum":["global_variables","custom_tools","mcp_tools","files"]},"workflowId":{"type":"string","description":"Optional workflow ID. If not provided, uses the current workflow in context."}},"required":["data_type"]}, -}; + id: 'get_workflow_data', + name: 'get_workflow_data', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + data_type: { + type: 'string', + description: 'The type of workflow data to retrieve', + enum: ['global_variables', 'custom_tools', 'mcp_tools', 'files'], + }, + workflowId: { + type: 'string', + description: 'Optional workflow ID. If not provided, uses the current workflow in context.', + }, + }, + required: ['data_type'], + }, +} export const GetWorkflowLogs: ToolCatalogEntry = { - id: "get_workflow_logs", - name: "get_workflow_logs", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"executionId":{"type":"string","description":"Optional execution ID to get logs for a specific execution. Use with get_execution_summary to find execution IDs first."},"includeDetails":{"type":"boolean","description":"Include detailed info"},"limit":{"type":"number","description":"Max number of entries (hard limit: 3)"},"workflowId":{"type":"string","description":"Optional workflow ID. If not provided, uses the current workflow in context."}}}, -}; + id: 'get_workflow_logs', + name: 'get_workflow_logs', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + executionId: { + type: 'string', + description: + 'Optional execution ID to get logs for a specific execution. Use with get_execution_summary to find execution IDs first.', + }, + includeDetails: { type: 'boolean', description: 'Include detailed info' }, + limit: { type: 'number', description: 'Max number of entries (hard limit: 3)' }, + workflowId: { + type: 'string', + description: 'Optional workflow ID. If not provided, uses the current workflow in context.', + }, + }, + }, +} export const Glob: ToolCatalogEntry = { - id: "glob", - name: "glob", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"pattern":{"type":"string","description":"Glob pattern to match file paths. Supports * (any segment) and ** (any depth)."},"toolTitle":{"type":"string","description":"Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like \"workflow configs\" or \"knowledge bases\", not a full sentence like \"Finding workflow configs\"."}},"required":["pattern","toolTitle"]}, -}; + id: 'glob', + name: 'glob', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + pattern: { + type: 'string', + description: + 'Glob pattern to match file paths. Supports * (any segment) and ** (any depth).', + }, + toolTitle: { + type: 'string', + description: + 'Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like "workflow configs" or "knowledge bases", not a full sentence like "Finding workflow configs".', + }, + }, + required: ['pattern', 'toolTitle'], + }, +} export const Grep: ToolCatalogEntry = { - id: "grep", - name: "grep", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"context":{"type":"number","description":"Number of lines to show before and after each match. Only applies to output_mode 'content'."},"ignoreCase":{"type":"boolean","description":"Case insensitive search (default false)."},"lineNumbers":{"type":"boolean","description":"Include line numbers in output (default true). Only applies to output_mode 'content'."},"maxResults":{"type":"number","description":"Maximum number of matches to return (default 50)."},"output_mode":{"type":"string","description":"Output mode: 'content' shows matching lines (default), 'files_with_matches' shows only file paths, 'count' shows match counts per file.","enum":["content","files_with_matches","count"]},"path":{"type":"string","description":"Optional path prefix to scope the search (e.g. 'workflows/', 'environment/', 'internal/', 'components/blocks/')."},"pattern":{"type":"string","description":"Regex pattern to search for in file contents."},"toolTitle":{"type":"string","description":"Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like \"Slack integrations\" or \"deployed workflows\", not a full sentence like \"Searching for Slack integrations\"."}},"required":["pattern","toolTitle"]}, -}; + id: 'grep', + name: 'grep', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + context: { + type: 'number', + description: + "Number of lines to show before and after each match. Only applies to output_mode 'content'.", + }, + ignoreCase: { type: 'boolean', description: 'Case insensitive search (default false).' }, + lineNumbers: { + type: 'boolean', + description: + "Include line numbers in output (default true). Only applies to output_mode 'content'.", + }, + maxResults: { + type: 'number', + description: 'Maximum number of matches to return (default 50).', + }, + output_mode: { + type: 'string', + description: + "Output mode: 'content' shows matching lines (default), 'files_with_matches' shows only file paths, 'count' shows match counts per file.", + enum: ['content', 'files_with_matches', 'count'], + }, + path: { + type: 'string', + description: + "Optional path prefix to scope the search (e.g. 'workflows/', 'environment/', 'internal/', 'components/blocks/').", + }, + pattern: { type: 'string', description: 'Regex pattern to search for in file contents.' }, + toolTitle: { + type: 'string', + description: + 'Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like "Slack integrations" or "deployed workflows", not a full sentence like "Searching for Slack integrations".', + }, + }, + required: ['pattern', 'toolTitle'], + }, +} export const Job: ToolCatalogEntry = { - id: "job", - name: "job", - route: "subagent", - mode: "async", - parameters: {"properties":{"request":{"description":"What job action is needed.","type":"string"}},"required":["request"],"type":"object"}, - subagentId: "job", + id: 'job', + name: 'job', + route: 'subagent', + mode: 'async', + parameters: { + properties: { request: { description: 'What job action is needed.', type: 'string' } }, + required: ['request'], + type: 'object', + }, + subagentId: 'job', internal: true, -}; +} export const Knowledge: ToolCatalogEntry = { - id: "knowledge", - name: "knowledge", - route: "subagent", - mode: "async", - parameters: {"properties":{"request":{"description":"What knowledge base action is needed.","type":"string"}},"required":["request"],"type":"object"}, - subagentId: "knowledge", + id: 'knowledge', + name: 'knowledge', + route: 'subagent', + mode: 'async', + parameters: { + properties: { + request: { description: 'What knowledge base action is needed.', type: 'string' }, + }, + required: ['request'], + type: 'object', + }, + subagentId: 'knowledge', internal: true, -}; +} export const KnowledgeBase: ToolCatalogEntry = { - id: "knowledge_base", - name: "knowledge_base", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"args":{"type":"object","description":"Arguments for the operation","properties":{"apiKey":{"type":"string","description":"API key for API-key-based connectors (required when connector auth mode is apiKey)"},"chunkingConfig":{"type":"object","description":"Chunking configuration (optional for 'create')","properties":{"maxSize":{"type":"number","description":"Maximum chunk size (100-4000, default: 1024)","default":1024},"minSize":{"type":"number","description":"Minimum chunk size (1-2000, default: 1)","default":1},"overlap":{"type":"number","description":"Overlap between chunks (0-500, default: 200)","default":200}}},"connectorId":{"type":"string","description":"Connector ID (required for update_connector, delete_connector, sync_connector)"},"connectorStatus":{"type":"string","description":"Connector status (optional for update_connector)","enum":["active","paused"]},"connectorType":{"type":"string","description":"Connector type from registry, e.g. 'confluence', 'google_drive', 'notion' (required for add_connector). Read knowledgebases/connectors/{type}.json for the config schema."},"credentialId":{"type":"string","description":"OAuth credential ID from environment/credentials.json (required for OAuth connectors)"},"description":{"type":"string","description":"Description of the knowledge base (optional for 'create')"},"disabledTagIds":{"type":"array","description":"Tag definition IDs to opt out of (optional for add_connector). See tagDefinitions in the connector schema."},"documentId":{"type":"string","description":"Document ID (required for update_document)"},"documentIds":{"type":"array","description":"Document IDs (for batch delete_document)","items":{"type":"string"}},"enabled":{"type":"boolean","description":"Enable/disable a document (optional for update_document)"},"fileIds":{"type":"array","description":"Canonical workspace file IDs to add as documents (for add_file). Discover via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\").","items":{"type":"string"}},"filename":{"type":"string","description":"New filename for a document (optional for update_document)"},"knowledgeBaseId":{"type":"string","description":"Knowledge base ID (required for get, query, add_file, list_tags, create_tag, get_tag_usage)"},"knowledgeBaseIds":{"type":"array","description":"Knowledge base IDs (for batch delete)","items":{"type":"string"}},"name":{"type":"string","description":"Name of the knowledge base (required for 'create')"},"query":{"type":"string","description":"Search query text (required for 'query')"},"sourceConfig":{"type":"object","description":"Connector-specific configuration matching the configFields in knowledgebases/connectors/{type}.json"},"syncIntervalMinutes":{"type":"number","description":"Sync interval in minutes: 60 (hourly), 360 (6h), 1440 (daily), 10080 (weekly), 0 (manual only). Default: 1440","default":1440},"tagDefinitionId":{"type":"string","description":"Tag definition ID (required for update_tag, delete_tag)"},"tagDisplayName":{"type":"string","description":"Display name for the tag (required for create_tag, optional for update_tag)"},"tagFieldType":{"type":"string","description":"Field type: text, number, date, boolean (optional for create_tag, defaults to text)","enum":["text","number","date","boolean"]},"topK":{"type":"number","description":"Number of results to return (1-50, default: 5)","default":5},"workspaceId":{"type":"string","description":"Workspace ID (required for 'create', optional filter for 'list')"}}},"operation":{"type":"string","description":"The operation to perform","enum":["create","get","query","add_file","update","delete","delete_document","update_document","list_tags","create_tag","update_tag","delete_tag","get_tag_usage","add_connector","update_connector","delete_connector","sync_connector"]}},"required":["operation","args"]}, - resultSchema: {"type":"object","properties":{"data":{"type":"object","description":"Operation-specific result payload."},"message":{"type":"string","description":"Human-readable outcome summary."},"success":{"type":"boolean","description":"Whether the operation succeeded."}},"required":["success","message"]}, + id: 'knowledge_base', + name: 'knowledge_base', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + args: { + type: 'object', + description: 'Arguments for the operation', + properties: { + apiKey: { + type: 'string', + description: + 'API key for API-key-based connectors (required when connector auth mode is apiKey)', + }, + chunkingConfig: { + type: 'object', + description: "Chunking configuration (optional for 'create')", + properties: { + maxSize: { + type: 'number', + description: 'Maximum chunk size (100-4000, default: 1024)', + default: 1024, + }, + minSize: { + type: 'number', + description: 'Minimum chunk size (1-2000, default: 1)', + default: 1, + }, + overlap: { + type: 'number', + description: 'Overlap between chunks (0-500, default: 200)', + default: 200, + }, + }, + }, + connectorId: { + type: 'string', + description: + 'Connector ID (required for update_connector, delete_connector, sync_connector)', + }, + connectorStatus: { + type: 'string', + description: 'Connector status (optional for update_connector)', + enum: ['active', 'paused'], + }, + connectorType: { + type: 'string', + description: + "Connector type from registry, e.g. 'confluence', 'google_drive', 'notion' (required for add_connector). Read knowledgebases/connectors/{type}.json for the config schema.", + }, + credentialId: { + type: 'string', + description: + 'OAuth credential ID from environment/credentials.json (required for OAuth connectors)', + }, + description: { + type: 'string', + description: "Description of the knowledge base (optional for 'create')", + }, + disabledTagIds: { + type: 'array', + description: + 'Tag definition IDs to opt out of (optional for add_connector). See tagDefinitions in the connector schema.', + }, + documentId: { type: 'string', description: 'Document ID (required for update_document)' }, + documentIds: { + type: 'array', + description: 'Document IDs (for batch delete_document)', + items: { type: 'string' }, + }, + enabled: { + type: 'boolean', + description: 'Enable/disable a document (optional for update_document)', + }, + fileIds: { + type: 'array', + description: + 'Canonical workspace file IDs to add as documents (for add_file). Discover via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json").', + items: { type: 'string' }, + }, + filename: { + type: 'string', + description: 'New filename for a document (optional for update_document)', + }, + knowledgeBaseId: { + type: 'string', + description: + 'Knowledge base ID (required for get, query, add_file, list_tags, create_tag, get_tag_usage)', + }, + knowledgeBaseIds: { + type: 'array', + description: 'Knowledge base IDs (for batch delete)', + items: { type: 'string' }, + }, + name: { + type: 'string', + description: "Name of the knowledge base (required for 'create')", + }, + query: { type: 'string', description: "Search query text (required for 'query')" }, + sourceConfig: { + type: 'object', + description: + 'Connector-specific configuration matching the configFields in knowledgebases/connectors/{type}.json', + }, + syncIntervalMinutes: { + type: 'number', + description: + 'Sync interval in minutes: 60 (hourly), 360 (6h), 1440 (daily), 10080 (weekly), 0 (manual only). Default: 1440', + default: 1440, + }, + tagDefinitionId: { + type: 'string', + description: 'Tag definition ID (required for update_tag, delete_tag)', + }, + tagDisplayName: { + type: 'string', + description: + 'Display name for the tag (required for create_tag, optional for update_tag)', + }, + tagFieldType: { + type: 'string', + description: + 'Field type: text, number, date, boolean (optional for create_tag, defaults to text)', + enum: ['text', 'number', 'date', 'boolean'], + }, + topK: { + type: 'number', + description: 'Number of results to return (1-50, default: 5)', + default: 5, + }, + workspaceId: { + type: 'string', + description: "Workspace ID (required for 'create', optional filter for 'list')", + }, + }, + }, + operation: { + type: 'string', + description: 'The operation to perform', + enum: [ + 'create', + 'get', + 'query', + 'add_file', + 'update', + 'delete', + 'delete_document', + 'update_document', + 'list_tags', + 'create_tag', + 'update_tag', + 'delete_tag', + 'get_tag_usage', + 'add_connector', + 'update_connector', + 'delete_connector', + 'sync_connector', + ], + }, + }, + required: ['operation', 'args'], + }, + resultSchema: { + type: 'object', + properties: { + data: { type: 'object', description: 'Operation-specific result payload.' }, + message: { type: 'string', description: 'Human-readable outcome summary.' }, + success: { type: 'boolean', description: 'Whether the operation succeeded.' }, + }, + required: ['success', 'message'], + }, requiresConfirmation: true, -}; +} export const ListFolders: ToolCatalogEntry = { - id: "list_folders", - name: "list_folders", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"workspaceId":{"type":"string","description":"Optional workspace ID to list folders for."}}}, -}; + id: 'list_folders', + name: 'list_folders', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + workspaceId: { type: 'string', description: 'Optional workspace ID to list folders for.' }, + }, + }, +} export const ListUserWorkspaces: ToolCatalogEntry = { - id: "list_user_workspaces", - name: "list_user_workspaces", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{}}, -}; + id: 'list_user_workspaces', + name: 'list_user_workspaces', + route: 'sim', + mode: 'async', + parameters: { type: 'object', properties: {} }, +} export const ListWorkspaceMcpServers: ToolCatalogEntry = { - id: "list_workspace_mcp_servers", - name: "list_workspace_mcp_servers", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"workspaceId":{"type":"string","description":"Workspace ID (defaults to current workspace)"}}}, -}; + id: 'list_workspace_mcp_servers', + name: 'list_workspace_mcp_servers', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + workspaceId: { type: 'string', description: 'Workspace ID (defaults to current workspace)' }, + }, + }, +} export const ManageCredential: ToolCatalogEntry = { - id: "manage_credential", - name: "manage_credential", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"credentialId":{"type":"string","description":"The credential ID (required for rename)"},"credentialIds":{"type":"array","description":"Array of credential IDs (for batch delete)","items":{"type":"string"}},"displayName":{"type":"string","description":"New display name (required for rename)"},"operation":{"type":"string","description":"The operation to perform","enum":["rename","delete"]}},"required":["operation"]}, + id: 'manage_credential', + name: 'manage_credential', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + credentialId: { type: 'string', description: 'The credential ID (required for rename)' }, + credentialIds: { + type: 'array', + description: 'Array of credential IDs (for batch delete)', + items: { type: 'string' }, + }, + displayName: { type: 'string', description: 'New display name (required for rename)' }, + operation: { + type: 'string', + description: 'The operation to perform', + enum: ['rename', 'delete'], + }, + }, + required: ['operation'], + }, requiresConfirmation: true, - requiredPermission: "admin", -}; + requiredPermission: 'admin', +} export const ManageCustomTool: ToolCatalogEntry = { - id: "manage_custom_tool", - name: "manage_custom_tool", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"code":{"type":"string","description":"The JavaScript code that executes when the tool is called (required for add). Parameters from schema are available as variables. Function body only - no signature or wrapping braces."},"operation":{"type":"string","description":"The operation to perform: 'add', 'edit', 'list', or 'delete'","enum":["add","edit","delete","list"]},"schema":{"type":"object","description":"The tool schema in OpenAI function calling format (required for add).","properties":{"function":{"type":"object","description":"The function definition","properties":{"description":{"type":"string","description":"What the function does"},"name":{"type":"string","description":"The function name (camelCase)"},"parameters":{"type":"object","description":"The function parameters schema","properties":{"properties":{"type":"object","description":"Parameter definitions as key-value pairs"},"required":{"type":"array","description":"Array of required parameter names","items":{"type":"string"}},"type":{"type":"string","description":"Must be 'object'"}},"required":["type","properties"]}},"required":["name","parameters"]},"type":{"type":"string","description":"Must be 'function'"}},"required":["type","function"]},"toolId":{"type":"string","description":"The ID of the custom tool (required for edit). Must be the exact toolId from the get_workflow_data custom tool response - do not guess or construct it. DO NOT PROVIDE THE TOOL ID IF THE OPERATION IS 'ADD'."},"toolIds":{"type":"array","description":"Array of custom tool IDs (for batch delete)","items":{"type":"string"}}},"required":["operation"]}, + id: 'manage_custom_tool', + name: 'manage_custom_tool', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + code: { + type: 'string', + description: + 'The JavaScript code that executes when the tool is called (required for add). Parameters from schema are available as variables. Function body only - no signature or wrapping braces.', + }, + operation: { + type: 'string', + description: "The operation to perform: 'add', 'edit', 'list', or 'delete'", + enum: ['add', 'edit', 'delete', 'list'], + }, + schema: { + type: 'object', + description: 'The tool schema in OpenAI function calling format (required for add).', + properties: { + function: { + type: 'object', + description: 'The function definition', + properties: { + description: { type: 'string', description: 'What the function does' }, + name: { type: 'string', description: 'The function name (camelCase)' }, + parameters: { + type: 'object', + description: 'The function parameters schema', + properties: { + properties: { + type: 'object', + description: 'Parameter definitions as key-value pairs', + }, + required: { + type: 'array', + description: 'Array of required parameter names', + items: { type: 'string' }, + }, + type: { type: 'string', description: "Must be 'object'" }, + }, + required: ['type', 'properties'], + }, + }, + required: ['name', 'parameters'], + }, + type: { type: 'string', description: "Must be 'function'" }, + }, + required: ['type', 'function'], + }, + toolId: { + type: 'string', + description: + "The ID of the custom tool (required for edit). Must be the exact toolId from the get_workflow_data custom tool response - do not guess or construct it. DO NOT PROVIDE THE TOOL ID IF THE OPERATION IS 'ADD'.", + }, + toolIds: { + type: 'array', + description: 'Array of custom tool IDs (for batch delete)', + items: { type: 'string' }, + }, + }, + required: ['operation'], + }, requiresConfirmation: true, -}; +} export const ManageJob: ToolCatalogEntry = { - id: "manage_job", - name: "manage_job", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"args":{"type":"object","description":"Operation-specific arguments. For create: {title, prompt, cron?, time?, timezone?, lifecycle?, successCondition?, maxRuns?}. For get/delete: {jobId}. For update: {jobId, title?, prompt?, cron?, timezone?, status?, lifecycle?, successCondition?, maxRuns?}. For list: no args needed.","properties":{"cron":{"type":"string","description":"Cron expression for recurring jobs"},"jobId":{"type":"string","description":"Job ID (required for get, update)"},"jobIds":{"type":"array","description":"Array of job IDs (for batch delete)","items":{"type":"string"}},"lifecycle":{"type":"string","description":"'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called."},"maxRuns":{"type":"integer","description":"Max executions before auto-completing. Safety limit."},"prompt":{"type":"string","description":"The prompt to execute when the job fires"},"status":{"type":"string","description":"Job status: active, paused"},"successCondition":{"type":"string","description":"What must happen for the job to be considered complete (until_complete lifecycle)."},"time":{"type":"string","description":"ISO 8601 datetime for one-time jobs or cron start time"},"timezone":{"type":"string","description":"IANA timezone (e.g. America/New_York). Defaults to UTC."},"title":{"type":"string","description":"Short descriptive title for the job (e.g. 'Email Poller')"}}},"operation":{"type":"string","description":"The operation to perform: create, list, get, update, delete","enum":["create","list","get","update","delete"]}},"required":["operation"]}, -}; + id: 'manage_job', + name: 'manage_job', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + args: { + type: 'object', + description: + 'Operation-specific arguments. For create: {title, prompt, cron?, time?, timezone?, lifecycle?, successCondition?, maxRuns?}. For get/delete: {jobId}. For update: {jobId, title?, prompt?, cron?, timezone?, status?, lifecycle?, successCondition?, maxRuns?}. For list: no args needed.', + properties: { + cron: { type: 'string', description: 'Cron expression for recurring jobs' }, + jobId: { type: 'string', description: 'Job ID (required for get, update)' }, + jobIds: { + type: 'array', + description: 'Array of job IDs (for batch delete)', + items: { type: 'string' }, + }, + lifecycle: { + type: 'string', + description: + "'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called.", + }, + maxRuns: { + type: 'integer', + description: 'Max executions before auto-completing. Safety limit.', + }, + prompt: { type: 'string', description: 'The prompt to execute when the job fires' }, + status: { type: 'string', description: 'Job status: active, paused' }, + successCondition: { + type: 'string', + description: + 'What must happen for the job to be considered complete (until_complete lifecycle).', + }, + time: { + type: 'string', + description: 'ISO 8601 datetime for one-time jobs or cron start time', + }, + timezone: { + type: 'string', + description: 'IANA timezone (e.g. America/New_York). Defaults to UTC.', + }, + title: { + type: 'string', + description: "Short descriptive title for the job (e.g. 'Email Poller')", + }, + }, + }, + operation: { + type: 'string', + description: 'The operation to perform: create, list, get, update, delete', + enum: ['create', 'list', 'get', 'update', 'delete'], + }, + }, + required: ['operation'], + }, +} export const ManageMcpTool: ToolCatalogEntry = { - id: "manage_mcp_tool", - name: "manage_mcp_tool", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"config":{"type":"object","description":"Required for add and edit. The MCP server configuration.","properties":{"enabled":{"type":"boolean","description":"Whether the server is enabled (default: true)"},"headers":{"type":"object","description":"Optional HTTP headers to send with requests (key-value pairs)"},"name":{"type":"string","description":"Display name for the MCP server"},"timeout":{"type":"number","description":"Request timeout in milliseconds (default: 30000)"},"transport":{"type":"string","description":"Transport protocol: 'streamable-http' or 'sse'","enum":["streamable-http","sse"],"default":"streamable-http"},"url":{"type":"string","description":"The MCP server endpoint URL (required for add)"}}},"operation":{"type":"string","description":"The operation to perform: 'add', 'edit', 'list', or 'delete'","enum":["add","edit","delete","list"]},"serverId":{"type":"string","description":"Required for edit and delete. The database ID of the MCP server. DO NOT PROVIDE if operation is 'add' or 'list'."}},"required":["operation"]}, + id: 'manage_mcp_tool', + name: 'manage_mcp_tool', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + config: { + type: 'object', + description: 'Required for add and edit. The MCP server configuration.', + properties: { + enabled: { + type: 'boolean', + description: 'Whether the server is enabled (default: true)', + }, + headers: { + type: 'object', + description: 'Optional HTTP headers to send with requests (key-value pairs)', + }, + name: { type: 'string', description: 'Display name for the MCP server' }, + timeout: { + type: 'number', + description: 'Request timeout in milliseconds (default: 30000)', + }, + transport: { + type: 'string', + description: "Transport protocol: 'streamable-http' or 'sse'", + enum: ['streamable-http', 'sse'], + default: 'streamable-http', + }, + url: { type: 'string', description: 'The MCP server endpoint URL (required for add)' }, + }, + }, + operation: { + type: 'string', + description: "The operation to perform: 'add', 'edit', 'list', or 'delete'", + enum: ['add', 'edit', 'delete', 'list'], + }, + serverId: { + type: 'string', + description: + "Required for edit and delete. The database ID of the MCP server. DO NOT PROVIDE if operation is 'add' or 'list'.", + }, + }, + required: ['operation'], + }, requiresConfirmation: true, - requiredPermission: "write", -}; + requiredPermission: 'write', +} export const ManageSkill: ToolCatalogEntry = { - id: "manage_skill", - name: "manage_skill", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"content":{"type":"string","description":"Markdown instructions for the skill. Required for add, optional for edit."},"description":{"type":"string","description":"Short description of the skill. Required for add, optional for edit."},"name":{"type":"string","description":"Skill name in kebab-case (e.g. 'my-skill'). Required for add, optional for edit."},"operation":{"type":"string","description":"The operation to perform: 'add', 'edit', 'list', or 'delete'","enum":["add","edit","delete","list"]},"skillId":{"type":"string","description":"The ID of the skill (required for edit/delete). Must be the exact ID from the VFS or list. DO NOT PROVIDE if operation is 'add' or 'list'."}},"required":["operation"]}, + id: 'manage_skill', + name: 'manage_skill', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + content: { + type: 'string', + description: 'Markdown instructions for the skill. Required for add, optional for edit.', + }, + description: { + type: 'string', + description: 'Short description of the skill. Required for add, optional for edit.', + }, + name: { + type: 'string', + description: + "Skill name in kebab-case (e.g. 'my-skill'). Required for add, optional for edit.", + }, + operation: { + type: 'string', + description: "The operation to perform: 'add', 'edit', 'list', or 'delete'", + enum: ['add', 'edit', 'delete', 'list'], + }, + skillId: { + type: 'string', + description: + "The ID of the skill (required for edit/delete). Must be the exact ID from the VFS or list. DO NOT PROVIDE if operation is 'add' or 'list'.", + }, + }, + required: ['operation'], + }, requiresConfirmation: true, - requiredPermission: "write", -}; + requiredPermission: 'write', +} export const MaterializeFile: ToolCatalogEntry = { - id: "materialize_file", - name: "materialize_file", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"fileNames":{"type":"array","description":"The names of the uploaded files to materialize (e.g. [\"report.pdf\", \"data.csv\"])","items":{"type":"string"}},"knowledgeBaseId":{"type":"string","description":"ID of an existing knowledge base to add the file to (only used with operation \"knowledge_base\"). If omitted, a new KB is created."},"operation":{"type":"string","description":"What to do with the file. \"save\" promotes it to files/. \"import\" imports a workflow JSON. \"table\" converts CSV/TSV/JSON to a table. \"knowledge_base\" saves and adds to a KB. Defaults to \"save\".","enum":["save","import","table","knowledge_base"],"default":"save"},"tableName":{"type":"string","description":"Custom name for the table (only used with operation \"table\"). Defaults to the file name without extension."}},"required":["fileNames"]}, - requiredPermission: "write", -}; + id: 'materialize_file', + name: 'materialize_file', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + fileNames: { + type: 'array', + description: + 'The names of the uploaded files to materialize (e.g. ["report.pdf", "data.csv"])', + items: { type: 'string' }, + }, + knowledgeBaseId: { + type: 'string', + description: + 'ID of an existing knowledge base to add the file to (only used with operation "knowledge_base"). If omitted, a new KB is created.', + }, + operation: { + type: 'string', + description: + 'What to do with the file. "save" promotes it to files/. "import" imports a workflow JSON. "table" converts CSV/TSV/JSON to a table. "knowledge_base" saves and adds to a KB. Defaults to "save".', + enum: ['save', 'import', 'table', 'knowledge_base'], + default: 'save', + }, + tableName: { + type: 'string', + description: + 'Custom name for the table (only used with operation "table"). Defaults to the file name without extension.', + }, + }, + required: ['fileNames'], + }, + requiredPermission: 'write', +} export const MoveFolder: ToolCatalogEntry = { - id: "move_folder", - name: "move_folder", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"folderId":{"type":"string","description":"The folder ID to move."},"parentId":{"type":"string","description":"Target parent folder ID. Omit or pass empty string to move to workspace root."}},"required":["folderId"]}, - requiredPermission: "write", -}; + id: 'move_folder', + name: 'move_folder', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + folderId: { type: 'string', description: 'The folder ID to move.' }, + parentId: { + type: 'string', + description: + 'Target parent folder ID. Omit or pass empty string to move to workspace root.', + }, + }, + required: ['folderId'], + }, + requiredPermission: 'write', +} export const MoveWorkflow: ToolCatalogEntry = { - id: "move_workflow", - name: "move_workflow", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"folderId":{"type":"string","description":"Target folder ID. Omit or pass empty string to move to workspace root."},"workflowIds":{"type":"array","description":"The workflow IDs to move.","items":{"type":"string"}}},"required":["workflowIds"]}, - requiredPermission: "write", -}; + id: 'move_workflow', + name: 'move_workflow', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + folderId: { + type: 'string', + description: 'Target folder ID. Omit or pass empty string to move to workspace root.', + }, + workflowIds: { + type: 'array', + description: 'The workflow IDs to move.', + items: { type: 'string' }, + }, + }, + required: ['workflowIds'], + }, + requiredPermission: 'write', +} export const OauthGetAuthLink: ToolCatalogEntry = { - id: "oauth_get_auth_link", - name: "oauth_get_auth_link", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"providerName":{"type":"string","description":"The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar', 'GitHub')"}},"required":["providerName"]}, -}; + id: 'oauth_get_auth_link', + name: 'oauth_get_auth_link', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + providerName: { + type: 'string', + description: + "The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar', 'GitHub')", + }, + }, + required: ['providerName'], + }, +} export const OauthRequestAccess: ToolCatalogEntry = { - id: "oauth_request_access", - name: "oauth_request_access", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"providerName":{"type":"string","description":"The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar')"}},"required":["providerName"]}, + id: 'oauth_request_access', + name: 'oauth_request_access', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + providerName: { + type: 'string', + description: + "The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar')", + }, + }, + required: ['providerName'], + }, requiresConfirmation: true, -}; +} export const OpenResource: ToolCatalogEntry = { - id: "open_resource", - name: "open_resource", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"resources":{"type":"array","description":"Array of resources to open. Each item must have type and id.","items":{"type":"object","properties":{"id":{"type":"string","description":"The resource ID."},"type":{"type":"string","description":"The resource type.","enum":["workflow","table","knowledgebase","file","log"]}},"required":["type","id"]}}},"required":["resources"]}, -}; + id: 'open_resource', + name: 'open_resource', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + resources: { + type: 'array', + description: 'Array of resources to open. Each item must have type and id.', + items: { + type: 'object', + properties: { + id: { type: 'string', description: 'The resource ID.' }, + type: { + type: 'string', + description: 'The resource type.', + enum: ['workflow', 'table', 'knowledgebase', 'file', 'log'], + }, + }, + required: ['type', 'id'], + }, + }, + }, + required: ['resources'], + }, +} export const Read: ToolCatalogEntry = { - id: "read", - name: "read", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"limit":{"type":"number","description":"Maximum number of lines to read."},"offset":{"type":"number","description":"Line offset to start reading from (0-indexed)."},"outputTable":{"type":"string","description":"Table ID to import the file contents into (CSV/JSON). All existing rows are replaced. Example: \"tbl_abc123\""},"path":{"type":"string","description":"Path to the file to read (e.g. 'workflows/My Workflow/state.json' or 'workflows/Projects/Q1/My Workflow/state.json')."}},"required":["path"]}, -}; + id: 'read', + name: 'read', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + limit: { type: 'number', description: 'Maximum number of lines to read.' }, + offset: { type: 'number', description: 'Line offset to start reading from (0-indexed).' }, + outputTable: { + type: 'string', + description: + 'Table ID to import the file contents into (CSV/JSON). All existing rows are replaced. Example: "tbl_abc123"', + }, + path: { + type: 'string', + description: + "Path to the file to read (e.g. 'workflows/My Workflow/state.json' or 'workflows/Projects/Q1/My Workflow/state.json').", + }, + }, + required: ['path'], + }, +} export const Redeploy: ToolCatalogEntry = { - id: "redeploy", - name: "redeploy", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"workflowId":{"type":"string","description":"Workflow ID to redeploy (required in workspace context)"}}}, - resultSchema: {"type":"object","properties":{"apiEndpoint":{"type":"string","description":"Canonical workflow execution endpoint."},"baseUrl":{"type":"string","description":"Base URL used to construct deployment URLs."},"deployedAt":{"type":"string","description":"Deployment timestamp when the workflow is deployed."},"deploymentConfig":{"type":"object","description":"Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details."},"deploymentStatus":{"type":"object","description":"Structured per-surface deployment status keyed by surface name, such as api."},"deploymentType":{"type":"string","description":"Deployment surface this result describes. For deploy_api and redeploy this is always \"api\"."},"examples":{"type":"object","description":"Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling."},"isDeployed":{"type":"boolean","description":"Whether the workflow API is currently deployed after this tool call."},"version":{"type":"number","description":"Deployment version for the current API deployment."},"workflowId":{"type":"string","description":"Workflow ID that was deployed or undeployed."}},"required":["workflowId","isDeployed","deploymentType","deploymentStatus","deploymentConfig","examples"]}, + id: 'redeploy', + name: 'redeploy', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'Workflow ID to redeploy (required in workspace context)', + }, + }, + }, + resultSchema: { + type: 'object', + properties: { + apiEndpoint: { type: 'string', description: 'Canonical workflow execution endpoint.' }, + baseUrl: { type: 'string', description: 'Base URL used to construct deployment URLs.' }, + deployedAt: { + type: 'string', + description: 'Deployment timestamp when the workflow is deployed.', + }, + deploymentConfig: { + type: 'object', + description: + 'Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details.', + }, + deploymentStatus: { + type: 'object', + description: 'Structured per-surface deployment status keyed by surface name, such as api.', + }, + deploymentType: { + type: 'string', + description: + 'Deployment surface this result describes. For deploy_api and redeploy this is always "api".', + }, + examples: { + type: 'object', + description: + 'Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling.', + }, + isDeployed: { + type: 'boolean', + description: 'Whether the workflow API is currently deployed after this tool call.', + }, + version: { + type: 'number', + description: 'Deployment version for the current API deployment.', + }, + workflowId: { type: 'string', description: 'Workflow ID that was deployed or undeployed.' }, + }, + required: [ + 'workflowId', + 'isDeployed', + 'deploymentType', + 'deploymentStatus', + 'deploymentConfig', + 'examples', + ], + }, requiresConfirmation: true, - requiredPermission: "admin", -}; + requiredPermission: 'admin', +} export const RenameFile: ToolCatalogEntry = { - id: "rename_file", - name: "rename_file", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"fileId":{"type":"string","description":"Canonical workspace file ID of the file to rename."},"newName":{"type":"string","description":"New filename including extension, e.g. \"draft_v2.md\". Must not contain slashes."}},"required":["fileId","newName"]}, - resultSchema: {"type":"object","properties":{"data":{"type":"object","description":"Contains id and the new name."},"message":{"type":"string","description":"Human-readable outcome."},"success":{"type":"boolean","description":"Whether the rename succeeded."}},"required":["success","message"]}, - requiredPermission: "write", -}; + id: 'rename_file', + name: 'rename_file', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + fileId: { type: 'string', description: 'Canonical workspace file ID of the file to rename.' }, + newName: { + type: 'string', + description: + 'New filename including extension, e.g. "draft_v2.md". Must not contain slashes.', + }, + }, + required: ['fileId', 'newName'], + }, + resultSchema: { + type: 'object', + properties: { + data: { type: 'object', description: 'Contains id and the new name.' }, + message: { type: 'string', description: 'Human-readable outcome.' }, + success: { type: 'boolean', description: 'Whether the rename succeeded.' }, + }, + required: ['success', 'message'], + }, + requiredPermission: 'write', +} export const RenameWorkflow: ToolCatalogEntry = { - id: "rename_workflow", - name: "rename_workflow", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"name":{"type":"string","description":"The new name for the workflow."},"workflowId":{"type":"string","description":"The workflow ID to rename."}},"required":["workflowId","name"]}, - requiredPermission: "write", -}; + id: 'rename_workflow', + name: 'rename_workflow', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + name: { type: 'string', description: 'The new name for the workflow.' }, + workflowId: { type: 'string', description: 'The workflow ID to rename.' }, + }, + required: ['workflowId', 'name'], + }, + requiredPermission: 'write', +} export const Research: ToolCatalogEntry = { - id: "research", - name: "research", - route: "subagent", - mode: "async", - parameters: {"properties":{"topic":{"description":"The topic to research.","type":"string"}},"required":["topic"],"type":"object"}, - subagentId: "research", + id: 'research', + name: 'research', + route: 'subagent', + mode: 'async', + parameters: { + properties: { topic: { description: 'The topic to research.', type: 'string' } }, + required: ['topic'], + type: 'object', + }, + subagentId: 'research', internal: true, -}; +} export const Respond: ToolCatalogEntry = { - id: "respond", - name: "respond", - route: "sim", - mode: "async", - parameters: {"additionalProperties":true,"properties":{"output":{"description":"The result — facts, status, VFS paths to persisted data, whatever the caller needs to act on.","type":"string"},"success":{"description":"Whether the task completed successfully","type":"boolean"},"type":{"description":"Optional logical result type override","type":"string"}},"required":["output","success"],"type":"object"}, + id: 'respond', + name: 'respond', + route: 'sim', + mode: 'async', + parameters: { + additionalProperties: true, + properties: { + output: { + description: + 'The result — facts, status, VFS paths to persisted data, whatever the caller needs to act on.', + type: 'string', + }, + success: { description: 'Whether the task completed successfully', type: 'boolean' }, + type: { description: 'Optional logical result type override', type: 'string' }, + }, + required: ['output', 'success'], + type: 'object', + }, internal: true, hidden: true, -}; +} export const RestoreResource: ToolCatalogEntry = { - id: "restore_resource", - name: "restore_resource", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"id":{"type":"string","description":"The canonical resource ID to restore."},"type":{"type":"string","description":"The resource type to restore.","enum":["workflow","table","file","knowledgebase","folder"]}},"required":["type","id"]}, + id: 'restore_resource', + name: 'restore_resource', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + id: { type: 'string', description: 'The canonical resource ID to restore.' }, + type: { + type: 'string', + description: 'The resource type to restore.', + enum: ['workflow', 'table', 'file', 'knowledgebase', 'folder'], + }, + }, + required: ['type', 'id'], + }, requiresConfirmation: true, - requiredPermission: "admin", -}; + requiredPermission: 'admin', +} export const RevertToVersion: ToolCatalogEntry = { - id: "revert_to_version", - name: "revert_to_version", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"version":{"type":"number","description":"The deployment version number to revert to"},"workflowId":{"type":"string","description":"The workflow ID"}},"required":["workflowId","version"]}, + id: 'revert_to_version', + name: 'revert_to_version', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + version: { type: 'number', description: 'The deployment version number to revert to' }, + workflowId: { type: 'string', description: 'The workflow ID' }, + }, + required: ['workflowId', 'version'], + }, requiresConfirmation: true, - requiredPermission: "admin", -}; + requiredPermission: 'admin', +} export const Run: ToolCatalogEntry = { - id: "run", - name: "run", - route: "subagent", - mode: "async", - parameters: {"properties":{"context":{"description":"Pre-gathered context: workflow state, block IDs, input requirements.","type":"string"},"request":{"description":"What to run or what logs to check.","type":"string"}},"required":["request"],"type":"object"}, - subagentId: "run", + id: 'run', + name: 'run', + route: 'subagent', + mode: 'async', + parameters: { + properties: { + context: { + description: 'Pre-gathered context: workflow state, block IDs, input requirements.', + type: 'string', + }, + request: { description: 'What to run or what logs to check.', type: 'string' }, + }, + required: ['request'], + type: 'object', + }, + subagentId: 'run', internal: true, -}; +} export const RunBlock: ToolCatalogEntry = { - id: "run_block", - name: "run_block", - route: "client", - mode: "async", - parameters: {"type":"object","properties":{"blockId":{"type":"string","description":"The block ID to run in isolation."},"executionId":{"type":"string","description":"Optional execution ID to load the snapshot from. Uses latest execution if omitted."},"useDeployedState":{"type":"boolean","description":"When true, runs the deployed version instead of the live draft. Default: false (draft)."},"workflowId":{"type":"string","description":"Optional workflow ID to run. If not provided, uses the current workflow in context."},"workflow_input":{"type":"object","description":"JSON object with key-value mappings where each key is an input field name"}},"required":["blockId"]}, + id: 'run_block', + name: 'run_block', + route: 'client', + mode: 'async', + parameters: { + type: 'object', + properties: { + blockId: { type: 'string', description: 'The block ID to run in isolation.' }, + executionId: { + type: 'string', + description: + 'Optional execution ID to load the snapshot from. Uses latest execution if omitted.', + }, + useDeployedState: { + type: 'boolean', + description: + 'When true, runs the deployed version instead of the live draft. Default: false (draft).', + }, + workflowId: { + type: 'string', + description: + 'Optional workflow ID to run. If not provided, uses the current workflow in context.', + }, + workflow_input: { + type: 'object', + description: 'JSON object with key-value mappings where each key is an input field name', + }, + }, + required: ['blockId'], + }, clientExecutable: true, requiresConfirmation: true, -}; +} export const RunFromBlock: ToolCatalogEntry = { - id: "run_from_block", - name: "run_from_block", - route: "client", - mode: "async", - parameters: {"type":"object","properties":{"executionId":{"type":"string","description":"Optional execution ID to load the snapshot from. Uses latest execution if omitted."},"startBlockId":{"type":"string","description":"The block ID to start execution from."},"useDeployedState":{"type":"boolean","description":"When true, runs the deployed version instead of the live draft. Default: false (draft)."},"workflowId":{"type":"string","description":"Optional workflow ID to run. If not provided, uses the current workflow in context."},"workflow_input":{"type":"object","description":"JSON object with key-value mappings where each key is an input field name"}},"required":["startBlockId"]}, + id: 'run_from_block', + name: 'run_from_block', + route: 'client', + mode: 'async', + parameters: { + type: 'object', + properties: { + executionId: { + type: 'string', + description: + 'Optional execution ID to load the snapshot from. Uses latest execution if omitted.', + }, + startBlockId: { type: 'string', description: 'The block ID to start execution from.' }, + useDeployedState: { + type: 'boolean', + description: + 'When true, runs the deployed version instead of the live draft. Default: false (draft).', + }, + workflowId: { + type: 'string', + description: + 'Optional workflow ID to run. If not provided, uses the current workflow in context.', + }, + workflow_input: { + type: 'object', + description: 'JSON object with key-value mappings where each key is an input field name', + }, + }, + required: ['startBlockId'], + }, clientExecutable: true, requiresConfirmation: true, -}; +} export const RunWorkflow: ToolCatalogEntry = { - id: "run_workflow", - name: "run_workflow", - route: "client", - mode: "async", - parameters: {"type":"object","properties":{"triggerBlockId":{"type":"string","description":"Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one."},"useDeployedState":{"type":"boolean","description":"When true, runs the deployed version instead of the live draft. Default: false (draft)."},"workflowId":{"type":"string","description":"Optional workflow ID to run. If not provided, uses the current workflow in context."},"workflow_input":{"type":"object","description":"JSON object with key-value mappings where each key is an input field name"}},"required":["workflow_input"]}, + id: 'run_workflow', + name: 'run_workflow', + route: 'client', + mode: 'async', + parameters: { + type: 'object', + properties: { + triggerBlockId: { + type: 'string', + description: + 'Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one.', + }, + useDeployedState: { + type: 'boolean', + description: + 'When true, runs the deployed version instead of the live draft. Default: false (draft).', + }, + workflowId: { + type: 'string', + description: + 'Optional workflow ID to run. If not provided, uses the current workflow in context.', + }, + workflow_input: { + type: 'object', + description: 'JSON object with key-value mappings where each key is an input field name', + }, + }, + required: ['workflow_input'], + }, clientExecutable: true, requiresConfirmation: true, -}; +} export const RunWorkflowUntilBlock: ToolCatalogEntry = { - id: "run_workflow_until_block", - name: "run_workflow_until_block", - route: "client", - mode: "async", - parameters: {"type":"object","properties":{"stopAfterBlockId":{"type":"string","description":"The block ID to stop after. Execution halts once this block completes."},"triggerBlockId":{"type":"string","description":"Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one."},"useDeployedState":{"type":"boolean","description":"When true, runs the deployed version instead of the live draft. Default: false (draft)."},"workflowId":{"type":"string","description":"Optional workflow ID to run. If not provided, uses the current workflow in context."},"workflow_input":{"type":"object","description":"JSON object with key-value mappings where each key is an input field name"}},"required":["stopAfterBlockId"]}, + id: 'run_workflow_until_block', + name: 'run_workflow_until_block', + route: 'client', + mode: 'async', + parameters: { + type: 'object', + properties: { + stopAfterBlockId: { + type: 'string', + description: 'The block ID to stop after. Execution halts once this block completes.', + }, + triggerBlockId: { + type: 'string', + description: + 'Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one.', + }, + useDeployedState: { + type: 'boolean', + description: + 'When true, runs the deployed version instead of the live draft. Default: false (draft).', + }, + workflowId: { + type: 'string', + description: + 'Optional workflow ID to run. If not provided, uses the current workflow in context.', + }, + workflow_input: { + type: 'object', + description: 'JSON object with key-value mappings where each key is an input field name', + }, + }, + required: ['stopAfterBlockId'], + }, clientExecutable: true, requiresConfirmation: true, -}; +} export const ScrapePage: ToolCatalogEntry = { - id: "scrape_page", - name: "scrape_page", - route: "go", - mode: "sync", - parameters: {"type":"object","properties":{"include_links":{"type":"boolean","description":"Extract all links from the page (default false)"},"url":{"type":"string","description":"The URL to scrape (must include https://)"},"wait_for":{"type":"string","description":"CSS selector to wait for before scraping (for JS-heavy pages)"}},"required":["url"]}, -}; + id: 'scrape_page', + name: 'scrape_page', + route: 'go', + mode: 'sync', + parameters: { + type: 'object', + properties: { + include_links: { + type: 'boolean', + description: 'Extract all links from the page (default false)', + }, + url: { type: 'string', description: 'The URL to scrape (must include https://)' }, + wait_for: { + type: 'string', + description: 'CSS selector to wait for before scraping (for JS-heavy pages)', + }, + }, + required: ['url'], + }, +} export const SearchDocumentation: ToolCatalogEntry = { - id: "search_documentation", - name: "search_documentation", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"query":{"type":"string","description":"The search query"},"topK":{"type":"number","description":"Number of results (max 10)"}},"required":["query"]}, -}; + id: 'search_documentation', + name: 'search_documentation', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + query: { type: 'string', description: 'The search query' }, + topK: { type: 'number', description: 'Number of results (max 10)' }, + }, + required: ['query'], + }, +} export const SearchLibraryDocs: ToolCatalogEntry = { - id: "search_library_docs", - name: "search_library_docs", - route: "go", - mode: "sync", - parameters: {"type":"object","properties":{"library_name":{"type":"string","description":"Name of the library to search for (e.g., 'nextjs', 'stripe', 'langchain')"},"query":{"type":"string","description":"The question or topic to find documentation for - be specific"},"version":{"type":"string","description":"Specific version (optional, e.g., '14', 'v2')"}},"required":["library_name","query"]}, -}; + id: 'search_library_docs', + name: 'search_library_docs', + route: 'go', + mode: 'sync', + parameters: { + type: 'object', + properties: { + library_name: { + type: 'string', + description: "Name of the library to search for (e.g., 'nextjs', 'stripe', 'langchain')", + }, + query: { + type: 'string', + description: 'The question or topic to find documentation for - be specific', + }, + version: { type: 'string', description: "Specific version (optional, e.g., '14', 'v2')" }, + }, + required: ['library_name', 'query'], + }, +} export const SearchOnline: ToolCatalogEntry = { - id: "search_online", - name: "search_online", - route: "go", - mode: "sync", - parameters: {"type":"object","properties":{"category":{"type":"string","description":"Filter by category","enum":["news","tweet","github","paper","company","research paper","linkedin profile","pdf","personal site"]},"include_text":{"type":"boolean","description":"Include page text content (default true)"},"num_results":{"type":"number","description":"Number of results (default 10, max 25)"},"query":{"type":"string","description":"Natural language search query"},"toolTitle":{"type":"string","description":"Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like \"pricing changes\" or \"Slack webhook docs\", not a full sentence like \"Searching online for pricing changes\"."}},"required":["query","toolTitle"]}, -}; + id: 'search_online', + name: 'search_online', + route: 'go', + mode: 'sync', + parameters: { + type: 'object', + properties: { + category: { + type: 'string', + description: 'Filter by category', + enum: [ + 'news', + 'tweet', + 'github', + 'paper', + 'company', + 'research paper', + 'linkedin profile', + 'pdf', + 'personal site', + ], + }, + include_text: { type: 'boolean', description: 'Include page text content (default true)' }, + num_results: { type: 'number', description: 'Number of results (default 10, max 25)' }, + query: { type: 'string', description: 'Natural language search query' }, + toolTitle: { + type: 'string', + description: + 'Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like "pricing changes" or "Slack webhook docs", not a full sentence like "Searching online for pricing changes".', + }, + }, + required: ['query', 'toolTitle'], + }, +} export const SearchPatterns: ToolCatalogEntry = { - id: "search_patterns", - name: "search_patterns", - route: "go", - mode: "sync", - parameters: {"type":"object","properties":{"limit":{"type":"integer","description":"Maximum number of unique pattern examples to return (defaults to 3)."},"queries":{"type":"array","description":"Up to 3 descriptive strings explaining the workflow pattern(s) you need. Focus on intent and desired outcomes.","items":{"type":"string","description":"Example: \"how to automate wealthbox meeting notes into follow-up tasks\""}}},"required":["queries"]}, -}; + id: 'search_patterns', + name: 'search_patterns', + route: 'go', + mode: 'sync', + parameters: { + type: 'object', + properties: { + limit: { + type: 'integer', + description: 'Maximum number of unique pattern examples to return (defaults to 3).', + }, + queries: { + type: 'array', + description: + 'Up to 3 descriptive strings explaining the workflow pattern(s) you need. Focus on intent and desired outcomes.', + items: { + type: 'string', + description: 'Example: "how to automate wealthbox meeting notes into follow-up tasks"', + }, + }, + }, + required: ['queries'], + }, +} export const SetBlockEnabled: ToolCatalogEntry = { - id: "set_block_enabled", - name: "set_block_enabled", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"blockId":{"type":"string","description":"The block ID whose enabled state should be changed."},"enabled":{"type":"boolean","description":"Set to true to enable the block, or false to disable it."},"workflowId":{"type":"string","description":"Optional workflow ID to edit. If not provided, uses the current workflow in context."}},"required":["blockId","enabled"]}, - requiredPermission: "write", -}; + id: 'set_block_enabled', + name: 'set_block_enabled', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + blockId: { + type: 'string', + description: 'The block ID whose enabled state should be changed.', + }, + enabled: { + type: 'boolean', + description: 'Set to true to enable the block, or false to disable it.', + }, + workflowId: { + type: 'string', + description: + 'Optional workflow ID to edit. If not provided, uses the current workflow in context.', + }, + }, + required: ['blockId', 'enabled'], + }, + requiredPermission: 'write', +} export const SetEnvironmentVariables: ToolCatalogEntry = { - id: "set_environment_variables", - name: "set_environment_variables", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"scope":{"type":"string","description":"Whether to set workspace or personal environment variables. Defaults to workspace.","enum":["personal","workspace"],"default":"workspace"},"variables":{"type":"array","description":"List of env vars to set","items":{"type":"object","properties":{"name":{"type":"string","description":"Variable name"},"value":{"type":"string","description":"Variable value"}},"required":["name","value"]}}},"required":["variables"]}, + id: 'set_environment_variables', + name: 'set_environment_variables', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + scope: { + type: 'string', + description: + 'Whether to set workspace or personal environment variables. Defaults to workspace.', + enum: ['personal', 'workspace'], + default: 'workspace', + }, + variables: { + type: 'array', + description: 'List of env vars to set', + items: { + type: 'object', + properties: { + name: { type: 'string', description: 'Variable name' }, + value: { type: 'string', description: 'Variable value' }, + }, + required: ['name', 'value'], + }, + }, + }, + required: ['variables'], + }, requiresConfirmation: true, - requiredPermission: "write", -}; + requiredPermission: 'write', +} export const SetGlobalWorkflowVariables: ToolCatalogEntry = { - id: "set_global_workflow_variables", - name: "set_global_workflow_variables", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"operations":{"type":"array","description":"List of operations to apply","items":{"type":"object","properties":{"name":{"type":"string"},"operation":{"type":"string","enum":["add","delete","edit"]},"type":{"type":"string","enum":["plain","number","boolean","array","object"]},"value":{"type":"string"}},"required":["operation","name","type","value"]}},"workflowId":{"type":"string","description":"Optional workflow ID. If not provided, uses the current workflow in context."}},"required":["operations"]}, + id: 'set_global_workflow_variables', + name: 'set_global_workflow_variables', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + operations: { + type: 'array', + description: 'List of operations to apply', + items: { + type: 'object', + properties: { + name: { type: 'string' }, + operation: { type: 'string', enum: ['add', 'delete', 'edit'] }, + type: { type: 'string', enum: ['plain', 'number', 'boolean', 'array', 'object'] }, + value: { type: 'string' }, + }, + required: ['operation', 'name', 'type', 'value'], + }, + }, + workflowId: { + type: 'string', + description: 'Optional workflow ID. If not provided, uses the current workflow in context.', + }, + }, + required: ['operations'], + }, requiresConfirmation: true, - requiredPermission: "write", -}; + requiredPermission: 'write', +} export const Superagent: ToolCatalogEntry = { - id: "superagent", - name: "superagent", - route: "subagent", - mode: "async", - parameters: {"properties":{"task":{"description":"A single sentence — the agent has full conversation context. Do NOT pre-read credentials or look up configs. Example: 'send the email we discussed' or 'check my calendar for tomorrow'.","type":"string"}},"required":["task"],"type":"object"}, - subagentId: "superagent", + id: 'superagent', + name: 'superagent', + route: 'subagent', + mode: 'async', + parameters: { + properties: { + task: { + description: + "A single sentence — the agent has full conversation context. Do NOT pre-read credentials or look up configs. Example: 'send the email we discussed' or 'check my calendar for tomorrow'.", + type: 'string', + }, + }, + required: ['task'], + type: 'object', + }, + subagentId: 'superagent', internal: true, -}; +} export const Table: ToolCatalogEntry = { - id: "table", - name: "table", - route: "subagent", - mode: "async", - parameters: {"properties":{"request":{"description":"What table action is needed.","type":"string"}},"required":["request"],"type":"object"}, - subagentId: "table", + id: 'table', + name: 'table', + route: 'subagent', + mode: 'async', + parameters: { + properties: { request: { description: 'What table action is needed.', type: 'string' } }, + required: ['request'], + type: 'object', + }, + subagentId: 'table', internal: true, -}; +} export const ToolSearchToolRegex: ToolCatalogEntry = { - id: "tool_search_tool_regex", - name: "tool_search_tool_regex", - route: "sim", - mode: "async", - parameters: {"properties":{"case_insensitive":{"description":"Whether the regex should be case-insensitive (default true).","type":"boolean"},"max_results":{"description":"Maximum number of tools to return (optional).","type":"integer"},"pattern":{"description":"Regular expression to match tool names or descriptions.","type":"string"}},"required":["pattern"],"type":"object"}, -}; + id: 'tool_search_tool_regex', + name: 'tool_search_tool_regex', + route: 'sim', + mode: 'async', + parameters: { + properties: { + case_insensitive: { + description: 'Whether the regex should be case-insensitive (default true).', + type: 'boolean', + }, + max_results: { + description: 'Maximum number of tools to return (optional).', + type: 'integer', + }, + pattern: { + description: 'Regular expression to match tool names or descriptions.', + type: 'string', + }, + }, + required: ['pattern'], + type: 'object', + }, +} export const UpdateJobHistory: ToolCatalogEntry = { - id: "update_job_history", - name: "update_job_history", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"jobId":{"type":"string","description":"The job ID."},"summary":{"type":"string","description":"A concise summary of what was done this run (e.g., 'Sent follow-up emails to 3 leads: Alice, Bob, Carol')."}},"required":["jobId","summary"]}, -}; + id: 'update_job_history', + name: 'update_job_history', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + jobId: { type: 'string', description: 'The job ID.' }, + summary: { + type: 'string', + description: + "A concise summary of what was done this run (e.g., 'Sent follow-up emails to 3 leads: Alice, Bob, Carol').", + }, + }, + required: ['jobId', 'summary'], + }, +} export const UpdateWorkspaceMcpServer: ToolCatalogEntry = { - id: "update_workspace_mcp_server", - name: "update_workspace_mcp_server", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"description":{"type":"string","description":"New description for the server"},"isPublic":{"type":"boolean","description":"Whether the server is publicly accessible"},"name":{"type":"string","description":"New name for the server"},"serverId":{"type":"string","description":"Required: the MCP server ID to update"}},"required":["serverId"]}, + id: 'update_workspace_mcp_server', + name: 'update_workspace_mcp_server', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + description: { type: 'string', description: 'New description for the server' }, + isPublic: { type: 'boolean', description: 'Whether the server is publicly accessible' }, + name: { type: 'string', description: 'New name for the server' }, + serverId: { type: 'string', description: 'Required: the MCP server ID to update' }, + }, + required: ['serverId'], + }, requiresConfirmation: true, - requiredPermission: "admin", -}; + requiredPermission: 'admin', +} export const UserMemory: ToolCatalogEntry = { - id: "user_memory", - name: "user_memory", - route: "go", - mode: "sync", - parameters: {"type":"object","properties":{"confidence":{"type":"number","description":"Confidence level 0-1 (default 1.0 for explicit, 0.8 for inferred)"},"correct_value":{"type":"string","description":"The correct value to replace the wrong one (for 'correct' operation)"},"key":{"type":"string","description":"Unique key for the memory (e.g., 'preferred_model', 'slack_credential')"},"limit":{"type":"number","description":"Number of results for search (default 10)"},"memory_type":{"type":"string","description":"Type of memory: 'preference', 'entity', 'history', or 'correction'","enum":["preference","entity","history","correction"]},"operation":{"type":"string","description":"Operation: 'add', 'search', 'delete', 'correct', or 'list'","enum":["add","search","delete","correct","list"]},"query":{"type":"string","description":"Search query to find relevant memories"},"source":{"type":"string","description":"Source: 'explicit' (user told you) or 'inferred' (you observed)","enum":["explicit","inferred"]},"value":{"type":"string","description":"Value to remember"}},"required":["operation"]}, -}; + id: 'user_memory', + name: 'user_memory', + route: 'go', + mode: 'sync', + parameters: { + type: 'object', + properties: { + confidence: { + type: 'number', + description: 'Confidence level 0-1 (default 1.0 for explicit, 0.8 for inferred)', + }, + correct_value: { + type: 'string', + description: "The correct value to replace the wrong one (for 'correct' operation)", + }, + key: { + type: 'string', + description: "Unique key for the memory (e.g., 'preferred_model', 'slack_credential')", + }, + limit: { type: 'number', description: 'Number of results for search (default 10)' }, + memory_type: { + type: 'string', + description: "Type of memory: 'preference', 'entity', 'history', or 'correction'", + enum: ['preference', 'entity', 'history', 'correction'], + }, + operation: { + type: 'string', + description: "Operation: 'add', 'search', 'delete', 'correct', or 'list'", + enum: ['add', 'search', 'delete', 'correct', 'list'], + }, + query: { type: 'string', description: 'Search query to find relevant memories' }, + source: { + type: 'string', + description: "Source: 'explicit' (user told you) or 'inferred' (you observed)", + enum: ['explicit', 'inferred'], + }, + value: { type: 'string', description: 'Value to remember' }, + }, + required: ['operation'], + }, +} export const UserTable: ToolCatalogEntry = { - id: "user_table", - name: "user_table", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"args":{"type":"object","description":"Arguments for the operation","properties":{"column":{"type":"object","description":"Column definition for add_column: { name, type, unique?, position? }"},"columnName":{"type":"string","description":"Column name (required for rename_column, update_column; use columnNames array for batch delete_column)"},"columnNames":{"type":"array","description":"Array of column names to delete at once (for delete_column). Preferred over columnName when deleting multiple columns."},"data":{"type":"object","description":"Row data as key-value pairs (required for insert_row, update_row)"},"description":{"type":"string","description":"Table description (optional for 'create')"},"fileId":{"type":"string","description":"Canonical workspace file ID for create_from_file/import_file. Discover via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\")."},"filePath":{"type":"string","description":"Legacy workspace file reference for create_from_file/import_file. Prefer fileId."},"filter":{"type":"object","description":"MongoDB-style filter for query_rows, update_rows_by_filter, delete_rows_by_filter"},"limit":{"type":"number","description":"Maximum rows to return or affect (optional, default 100)"},"name":{"type":"string","description":"Table name (required for 'create')"},"newName":{"type":"string","description":"New column name (required for rename_column)"},"newType":{"type":"string","description":"New column type (optional for update_column). Types: string, number, boolean, date, json"},"offset":{"type":"number","description":"Number of rows to skip (optional for query_rows, default 0)"},"outputFormat":{"type":"string","description":"Explicit format override for outputPath. Usually unnecessary — the file extension determines the format automatically. Only use this to force a different format than what the extension implies.","enum":["json","csv","txt","md","html"]},"outputPath":{"type":"string","description":"Pipe query_rows results directly to a NEW workspace file. The format is auto-inferred from the file extension: .csv → CSV, .json → JSON, .md → Markdown, etc. Use .csv for tabular exports. Use a flat path like \"files/export.csv\" — nested paths are not supported."},"rowId":{"type":"string","description":"Row ID (required for get_row, update_row, delete_row)"},"rowIds":{"type":"array","description":"Array of row IDs to delete (for batch_delete_rows)"},"rows":{"type":"array","description":"Array of row data objects (required for batch_insert_rows)"},"schema":{"type":"object","description":"Table schema with columns array (required for 'create'). Each column: { name, type, unique? }"},"sort":{"type":"object","description":"Sort specification as { field: 'asc' | 'desc' } (optional for query_rows)"},"tableId":{"type":"string","description":"Table ID (required for most operations except 'create' and batch 'delete')"},"tableIds":{"type":"array","description":"Array of table IDs (for batch delete)","items":{"type":"string"}},"unique":{"type":"boolean","description":"Set column unique constraint (optional for update_column)"},"updates":{"type":"array","description":"Array of per-row updates: [{ rowId, data: { col: val } }] (for batch_update_rows)"},"values":{"type":"object","description":"Map of rowId to value for single-column batch update: { \"rowId1\": val1, \"rowId2\": val2 } (for batch_update_rows with columnName)"}}},"operation":{"type":"string","description":"The operation to perform","enum":["create","create_from_file","import_file","get","get_schema","delete","insert_row","batch_insert_rows","get_row","query_rows","update_row","delete_row","update_rows_by_filter","delete_rows_by_filter","batch_update_rows","batch_delete_rows","add_column","rename_column","delete_column","update_column"]}},"required":["operation","args"]}, - resultSchema: {"type":"object","properties":{"data":{"type":"object","description":"Operation-specific result payload."},"message":{"type":"string","description":"Human-readable outcome summary."},"success":{"type":"boolean","description":"Whether the operation succeeded."}},"required":["success","message"]}, + id: 'user_table', + name: 'user_table', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + args: { + type: 'object', + description: 'Arguments for the operation', + properties: { + column: { + type: 'object', + description: 'Column definition for add_column: { name, type, unique?, position? }', + }, + columnName: { + type: 'string', + description: + 'Column name (required for rename_column, update_column; use columnNames array for batch delete_column)', + }, + columnNames: { + type: 'array', + description: + 'Array of column names to delete at once (for delete_column). Preferred over columnName when deleting multiple columns.', + }, + data: { + type: 'object', + description: 'Row data as key-value pairs (required for insert_row, update_row)', + }, + description: { type: 'string', description: "Table description (optional for 'create')" }, + fileId: { + type: 'string', + description: + 'Canonical workspace file ID for create_from_file/import_file. Discover via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json").', + }, + filePath: { + type: 'string', + description: + 'Legacy workspace file reference for create_from_file/import_file. Prefer fileId.', + }, + filter: { + type: 'object', + description: + 'MongoDB-style filter for query_rows, update_rows_by_filter, delete_rows_by_filter', + }, + limit: { + type: 'number', + description: 'Maximum rows to return or affect (optional, default 100)', + }, + name: { type: 'string', description: "Table name (required for 'create')" }, + newName: { type: 'string', description: 'New column name (required for rename_column)' }, + newType: { + type: 'string', + description: + 'New column type (optional for update_column). Types: string, number, boolean, date, json', + }, + offset: { + type: 'number', + description: 'Number of rows to skip (optional for query_rows, default 0)', + }, + outputFormat: { + type: 'string', + description: + 'Explicit format override for outputPath. Usually unnecessary — the file extension determines the format automatically. Only use this to force a different format than what the extension implies.', + enum: ['json', 'csv', 'txt', 'md', 'html'], + }, + outputPath: { + type: 'string', + description: + 'Pipe query_rows results directly to a NEW workspace file. The format is auto-inferred from the file extension: .csv → CSV, .json → JSON, .md → Markdown, etc. Use .csv for tabular exports. Use a flat path like "files/export.csv" — nested paths are not supported.', + }, + rowId: { + type: 'string', + description: 'Row ID (required for get_row, update_row, delete_row)', + }, + rowIds: { + type: 'array', + description: 'Array of row IDs to delete (for batch_delete_rows)', + }, + rows: { + type: 'array', + description: 'Array of row data objects (required for batch_insert_rows)', + }, + schema: { + type: 'object', + description: + "Table schema with columns array (required for 'create'). Each column: { name, type, unique? }", + }, + sort: { + type: 'object', + description: + "Sort specification as { field: 'asc' | 'desc' } (optional for query_rows)", + }, + tableId: { + type: 'string', + description: + "Table ID (required for most operations except 'create' and batch 'delete')", + }, + tableIds: { + type: 'array', + description: 'Array of table IDs (for batch delete)', + items: { type: 'string' }, + }, + unique: { + type: 'boolean', + description: 'Set column unique constraint (optional for update_column)', + }, + updates: { + type: 'array', + description: + 'Array of per-row updates: [{ rowId, data: { col: val } }] (for batch_update_rows)', + }, + values: { + type: 'object', + description: + 'Map of rowId to value for single-column batch update: { "rowId1": val1, "rowId2": val2 } (for batch_update_rows with columnName)', + }, + }, + }, + operation: { + type: 'string', + description: 'The operation to perform', + enum: [ + 'create', + 'create_from_file', + 'import_file', + 'get', + 'get_schema', + 'delete', + 'insert_row', + 'batch_insert_rows', + 'get_row', + 'query_rows', + 'update_row', + 'delete_row', + 'update_rows_by_filter', + 'delete_rows_by_filter', + 'batch_update_rows', + 'batch_delete_rows', + 'add_column', + 'rename_column', + 'delete_column', + 'update_column', + ], + }, + }, + required: ['operation', 'args'], + }, + resultSchema: { + type: 'object', + properties: { + data: { type: 'object', description: 'Operation-specific result payload.' }, + message: { type: 'string', description: 'Human-readable outcome summary.' }, + success: { type: 'boolean', description: 'Whether the operation succeeded.' }, + }, + required: ['success', 'message'], + }, requiresConfirmation: true, -}; +} export const Workflow: ToolCatalogEntry = { - id: "workflow", - name: "workflow", - route: "subagent", - mode: "async", - parameters: {"type":"object"}, - subagentId: "workflow", + id: 'workflow', + name: 'workflow', + route: 'subagent', + mode: 'async', + parameters: { type: 'object' }, + subagentId: 'workflow', internal: true, -}; +} export const WorkspaceFile: ToolCatalogEntry = { - id: "workspace_file", - name: "workspace_file", - route: "sim", - mode: "async", - parameters: {"type":"object","properties":{"operation":{"type":"string","description":"The file operation to perform.","enum":["append","update","patch"]},"target":{"type":"object","description":"Explicit file target. Use kind=file_id + fileId for existing files.","properties":{"fileId":{"type":"string","description":"Canonical existing workspace file ID. Required when target.kind=file_id."},"fileName":{"type":"string","description":"Plain workspace filename including extension, e.g. \"main.py\" or \"report.docx\". Required when target.kind=new_file."},"kind":{"type":"string","description":"How the file target is identified.","enum":["new_file","file_id"]}},"required":["kind"]},"title":{"type":"string","description":"Required short UI label for this content unit, e.g. \"Chapter 1\", \"Slide 3\", or \"Fix footer spacing\"."},"contentType":{"type":"string","description":"Optional MIME type override. Usually omit and let the system infer from the target file extension.","enum":["text/markdown","text/html","text/plain","application/json","text/csv","application/vnd.openxmlformats-officedocument.presentationml.presentation","application/vnd.openxmlformats-officedocument.wordprocessingml.document","application/pdf"]},"edit":{"type":"object","description":"Patch metadata. Use strategy=search_replace for exact text replacement, or strategy=anchored for line-based inserts/replacements/deletions. The actual replacement/insert content is provided via the paired edit_content tool call.","properties":{"after_anchor":{"type":"string","description":"Boundary line kept after inserted replacement content. Required for mode=replace_between."},"anchor":{"type":"string","description":"Anchor line after which new content is inserted. Required for mode=insert_after."},"before_anchor":{"type":"string","description":"Boundary line kept before inserted replacement content. Required for mode=replace_between."},"end_anchor":{"type":"string","description":"First line to keep after deletion. Required for mode=delete_between."},"mode":{"type":"string","description":"Anchored edit mode when strategy=anchored.","enum":["replace_between","insert_after","delete_between"]},"occurrence":{"type":"number","description":"1-based occurrence for repeated anchor lines. Optional; defaults to 1."},"replaceAll":{"type":"boolean","description":"When true and strategy=search_replace, replace every match instead of requiring a unique single match."},"search":{"type":"string","description":"Exact text to find when strategy=search_replace. Must match exactly once unless replaceAll=true."},"start_anchor":{"type":"string","description":"First line to delete. Required for mode=delete_between."},"strategy":{"type":"string","description":"Patch strategy.","enum":["search_replace","anchored"]}}},"newName":{"type":"string","description":"New file name for rename. Must be a plain workspace filename like \"main.py\"."}},"required":["operation","target","title"]}, - resultSchema: {"type":"object","properties":{"data":{"type":"object","description":"Optional operation metadata such as file id, file name, size, and content type."},"message":{"type":"string","description":"Human-readable summary of the outcome."},"success":{"type":"boolean","description":"Whether the file operation succeeded."}},"required":["success","message"]}, - requiredPermission: "write", -}; + id: 'workspace_file', + name: 'workspace_file', + route: 'sim', + mode: 'async', + parameters: { + type: 'object', + properties: { + operation: { + type: 'string', + description: 'The file operation to perform.', + enum: ['append', 'update', 'patch'], + }, + target: { + type: 'object', + description: 'Explicit file target. Use kind=file_id + fileId for existing files.', + properties: { + fileId: { + type: 'string', + description: 'Canonical existing workspace file ID. Required when target.kind=file_id.', + }, + fileName: { + type: 'string', + description: + 'Plain workspace filename including extension, e.g. "main.py" or "report.docx". Required when target.kind=new_file.', + }, + kind: { + type: 'string', + description: 'How the file target is identified.', + enum: ['new_file', 'file_id'], + }, + }, + required: ['kind'], + }, + title: { + type: 'string', + description: + 'Required short UI label for this content unit, e.g. "Chapter 1", "Slide 3", or "Fix footer spacing".', + }, + contentType: { + type: 'string', + description: + 'Optional MIME type override. Usually omit and let the system infer from the target file extension.', + enum: [ + 'text/markdown', + 'text/html', + 'text/plain', + 'application/json', + 'text/csv', + 'application/vnd.openxmlformats-officedocument.presentationml.presentation', + 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', + 'application/pdf', + ], + }, + edit: { + type: 'object', + description: + 'Patch metadata. Use strategy=search_replace for exact text replacement, or strategy=anchored for line-based inserts/replacements/deletions. The actual replacement/insert content is provided via the paired edit_content tool call.', + properties: { + after_anchor: { + type: 'string', + description: + 'Boundary line kept after inserted replacement content. Required for mode=replace_between.', + }, + anchor: { + type: 'string', + description: + 'Anchor line after which new content is inserted. Required for mode=insert_after.', + }, + before_anchor: { + type: 'string', + description: + 'Boundary line kept before inserted replacement content. Required for mode=replace_between.', + }, + end_anchor: { + type: 'string', + description: 'First line to keep after deletion. Required for mode=delete_between.', + }, + mode: { + type: 'string', + description: 'Anchored edit mode when strategy=anchored.', + enum: ['replace_between', 'insert_after', 'delete_between'], + }, + occurrence: { + type: 'number', + description: '1-based occurrence for repeated anchor lines. Optional; defaults to 1.', + }, + replaceAll: { + type: 'boolean', + description: + 'When true and strategy=search_replace, replace every match instead of requiring a unique single match.', + }, + search: { + type: 'string', + description: + 'Exact text to find when strategy=search_replace. Must match exactly once unless replaceAll=true.', + }, + start_anchor: { + type: 'string', + description: 'First line to delete. Required for mode=delete_between.', + }, + strategy: { + type: 'string', + description: 'Patch strategy.', + enum: ['search_replace', 'anchored'], + }, + }, + }, + newName: { + type: 'string', + description: 'New file name for rename. Must be a plain workspace filename like "main.py".', + }, + }, + required: ['operation', 'target', 'title'], + }, + resultSchema: { + type: 'object', + properties: { + data: { + type: 'object', + description: + 'Optional operation metadata such as file id, file name, size, and content type.', + }, + message: { type: 'string', description: 'Human-readable summary of the outcome.' }, + success: { type: 'boolean', description: 'Whether the file operation succeeded.' }, + }, + required: ['success', 'message'], + }, + requiredPermission: 'write', +} export const KnowledgeBaseOperation = { - create: "create", - get: "get", - query: "query", - addFile: "add_file", - update: "update", - delete: "delete", - deleteDocument: "delete_document", - updateDocument: "update_document", - listTags: "list_tags", - createTag: "create_tag", - updateTag: "update_tag", - deleteTag: "delete_tag", - getTagUsage: "get_tag_usage", - addConnector: "add_connector", - updateConnector: "update_connector", - deleteConnector: "delete_connector", - syncConnector: "sync_connector", -} as const; - -export type KnowledgeBaseOperation = (typeof KnowledgeBaseOperation)[keyof typeof KnowledgeBaseOperation]; + create: 'create', + get: 'get', + query: 'query', + addFile: 'add_file', + update: 'update', + delete: 'delete', + deleteDocument: 'delete_document', + updateDocument: 'update_document', + listTags: 'list_tags', + createTag: 'create_tag', + updateTag: 'update_tag', + deleteTag: 'delete_tag', + getTagUsage: 'get_tag_usage', + addConnector: 'add_connector', + updateConnector: 'update_connector', + deleteConnector: 'delete_connector', + syncConnector: 'sync_connector', +} as const + +export type KnowledgeBaseOperation = + (typeof KnowledgeBaseOperation)[keyof typeof KnowledgeBaseOperation] export const KnowledgeBaseOperationValues = [ KnowledgeBaseOperation.create, @@ -854,45 +3123,47 @@ export const KnowledgeBaseOperationValues = [ KnowledgeBaseOperation.updateConnector, KnowledgeBaseOperation.deleteConnector, KnowledgeBaseOperation.syncConnector, -] as const; +] as const export const ManageCredentialOperation = { - rename: "rename", - delete: "delete", -} as const; + rename: 'rename', + delete: 'delete', +} as const -export type ManageCredentialOperation = (typeof ManageCredentialOperation)[keyof typeof ManageCredentialOperation]; +export type ManageCredentialOperation = + (typeof ManageCredentialOperation)[keyof typeof ManageCredentialOperation] export const ManageCredentialOperationValues = [ ManageCredentialOperation.rename, ManageCredentialOperation.delete, -] as const; +] as const export const ManageCustomToolOperation = { - add: "add", - edit: "edit", - delete: "delete", - list: "list", -} as const; + add: 'add', + edit: 'edit', + delete: 'delete', + list: 'list', +} as const -export type ManageCustomToolOperation = (typeof ManageCustomToolOperation)[keyof typeof ManageCustomToolOperation]; +export type ManageCustomToolOperation = + (typeof ManageCustomToolOperation)[keyof typeof ManageCustomToolOperation] export const ManageCustomToolOperationValues = [ ManageCustomToolOperation.add, ManageCustomToolOperation.edit, ManageCustomToolOperation.delete, ManageCustomToolOperation.list, -] as const; +] as const export const ManageJobOperation = { - create: "create", - list: "list", - get: "get", - update: "update", - delete: "delete", -} as const; + create: 'create', + list: 'list', + get: 'get', + update: 'update', + delete: 'delete', +} as const -export type ManageJobOperation = (typeof ManageJobOperation)[keyof typeof ManageJobOperation]; +export type ManageJobOperation = (typeof ManageJobOperation)[keyof typeof ManageJobOperation] export const ManageJobOperationValues = [ ManageJobOperation.create, @@ -900,65 +3171,67 @@ export const ManageJobOperationValues = [ ManageJobOperation.get, ManageJobOperation.update, ManageJobOperation.delete, -] as const; +] as const export const ManageMcpToolOperation = { - add: "add", - edit: "edit", - delete: "delete", - list: "list", -} as const; + add: 'add', + edit: 'edit', + delete: 'delete', + list: 'list', +} as const -export type ManageMcpToolOperation = (typeof ManageMcpToolOperation)[keyof typeof ManageMcpToolOperation]; +export type ManageMcpToolOperation = + (typeof ManageMcpToolOperation)[keyof typeof ManageMcpToolOperation] export const ManageMcpToolOperationValues = [ ManageMcpToolOperation.add, ManageMcpToolOperation.edit, ManageMcpToolOperation.delete, ManageMcpToolOperation.list, -] as const; +] as const export const ManageSkillOperation = { - add: "add", - edit: "edit", - delete: "delete", - list: "list", -} as const; + add: 'add', + edit: 'edit', + delete: 'delete', + list: 'list', +} as const -export type ManageSkillOperation = (typeof ManageSkillOperation)[keyof typeof ManageSkillOperation]; +export type ManageSkillOperation = (typeof ManageSkillOperation)[keyof typeof ManageSkillOperation] export const ManageSkillOperationValues = [ ManageSkillOperation.add, ManageSkillOperation.edit, ManageSkillOperation.delete, ManageSkillOperation.list, -] as const; +] as const export const MaterializeFileOperation = { - save: "save", - import: "import", - table: "table", - knowledgeBase: "knowledge_base", -} as const; + save: 'save', + import: 'import', + table: 'table', + knowledgeBase: 'knowledge_base', +} as const -export type MaterializeFileOperation = (typeof MaterializeFileOperation)[keyof typeof MaterializeFileOperation]; +export type MaterializeFileOperation = + (typeof MaterializeFileOperation)[keyof typeof MaterializeFileOperation] export const MaterializeFileOperationValues = [ MaterializeFileOperation.save, MaterializeFileOperation.import, MaterializeFileOperation.table, MaterializeFileOperation.knowledgeBase, -] as const; +] as const export const UserMemoryOperation = { - add: "add", - search: "search", - delete: "delete", - correct: "correct", - list: "list", -} as const; + add: 'add', + search: 'search', + delete: 'delete', + correct: 'correct', + list: 'list', +} as const -export type UserMemoryOperation = (typeof UserMemoryOperation)[keyof typeof UserMemoryOperation]; +export type UserMemoryOperation = (typeof UserMemoryOperation)[keyof typeof UserMemoryOperation] export const UserMemoryOperationValues = [ UserMemoryOperation.add, @@ -966,32 +3239,32 @@ export const UserMemoryOperationValues = [ UserMemoryOperation.delete, UserMemoryOperation.correct, UserMemoryOperation.list, -] as const; +] as const export const UserTableOperation = { - create: "create", - createFromFile: "create_from_file", - importFile: "import_file", - get: "get", - getSchema: "get_schema", - delete: "delete", - insertRow: "insert_row", - batchInsertRows: "batch_insert_rows", - getRow: "get_row", - queryRows: "query_rows", - updateRow: "update_row", - deleteRow: "delete_row", - updateRowsByFilter: "update_rows_by_filter", - deleteRowsByFilter: "delete_rows_by_filter", - batchUpdateRows: "batch_update_rows", - batchDeleteRows: "batch_delete_rows", - addColumn: "add_column", - renameColumn: "rename_column", - deleteColumn: "delete_column", - updateColumn: "update_column", -} as const; - -export type UserTableOperation = (typeof UserTableOperation)[keyof typeof UserTableOperation]; + create: 'create', + createFromFile: 'create_from_file', + importFile: 'import_file', + get: 'get', + getSchema: 'get_schema', + delete: 'delete', + insertRow: 'insert_row', + batchInsertRows: 'batch_insert_rows', + getRow: 'get_row', + queryRows: 'query_rows', + updateRow: 'update_row', + deleteRow: 'delete_row', + updateRowsByFilter: 'update_rows_by_filter', + deleteRowsByFilter: 'delete_rows_by_filter', + batchUpdateRows: 'batch_update_rows', + batchDeleteRows: 'batch_delete_rows', + addColumn: 'add_column', + renameColumn: 'rename_column', + deleteColumn: 'delete_column', + updateColumn: 'update_column', +} as const + +export type UserTableOperation = (typeof UserTableOperation)[keyof typeof UserTableOperation] export const UserTableOperationValues = [ UserTableOperation.create, @@ -1014,21 +3287,22 @@ export const UserTableOperationValues = [ UserTableOperation.renameColumn, UserTableOperation.deleteColumn, UserTableOperation.updateColumn, -] as const; +] as const export const WorkspaceFileOperation = { - append: "append", - update: "update", - patch: "patch", -} as const; + append: 'append', + update: 'update', + patch: 'patch', +} as const -export type WorkspaceFileOperation = (typeof WorkspaceFileOperation)[keyof typeof WorkspaceFileOperation]; +export type WorkspaceFileOperation = + (typeof WorkspaceFileOperation)[keyof typeof WorkspaceFileOperation] export const WorkspaceFileOperationValues = [ WorkspaceFileOperation.append, WorkspaceFileOperation.update, WorkspaceFileOperation.patch, -] as const; +] as const export const TOOL_CATALOG: Record = { [Agent.id]: Agent, @@ -1118,4 +3392,4 @@ export const TOOL_CATALOG: Record = { [UserTable.id]: UserTable, [Workflow.id]: Workflow, [WorkspaceFile.id]: WorkspaceFile, -}; +} diff --git a/apps/sim/lib/copilot/generated/tool-schemas-v1.ts b/apps/sim/lib/copilot/generated/tool-schemas-v1.ts index 9d938ce53d7..78e624c8473 100644 --- a/apps/sim/lib/copilot/generated/tool-schemas-v1.ts +++ b/apps/sim/lib/copilot/generated/tool-schemas-v1.ts @@ -5,3125 +5,2899 @@ export type JsonSchema = unknown export interface ToolRuntimeSchemaEntry { - parameters?: JsonSchema; - resultSchema?: JsonSchema; + parameters?: JsonSchema + resultSchema?: JsonSchema } export const TOOL_RUNTIME_SCHEMAS: Record = { - ["agent"]: { + agent: { parameters: { - "properties": { - "request": { - "description": "What tool/skill/MCP action is needed.", - "type": "string" - } + properties: { + request: { + description: 'What tool/skill/MCP action is needed.', + type: 'string', + }, }, - "required": [ - "request" - ], - "type": "object" + required: ['request'], + type: 'object', }, resultSchema: undefined, }, - ["auth"]: { + auth: { parameters: { - "properties": { - "request": { - "description": "What authentication/credential action is needed.", - "type": "string" - } + properties: { + request: { + description: 'What authentication/credential action is needed.', + type: 'string', + }, }, - "required": [ - "request" - ], - "type": "object" + required: ['request'], + type: 'object', }, resultSchema: undefined, }, - ["check_deployment_status"]: { + check_deployment_status: { parameters: { - "type": "object", - "properties": { - "workflowId": { - "type": "string", - "description": "Workflow ID to check (defaults to current workflow)" - } - } + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'Workflow ID to check (defaults to current workflow)', + }, + }, }, resultSchema: undefined, }, - ["complete_job"]: { + complete_job: { parameters: { - "type": "object", - "properties": { - "jobId": { - "type": "string", - "description": "The ID of the job to mark as completed." - } + type: 'object', + properties: { + jobId: { + type: 'string', + description: 'The ID of the job to mark as completed.', + }, }, - "required": [ - "jobId" - ] + required: ['jobId'], }, resultSchema: undefined, }, - ["context_write"]: { + context_write: { parameters: { - "type": "object", - "properties": { - "content": { - "type": "string", - "description": "Full content to write to the file (replaces existing content)" + type: 'object', + properties: { + content: { + type: 'string', + description: 'Full content to write to the file (replaces existing content)', + }, + file_path: { + type: 'string', + description: "Path of the file to write (e.g. 'SESSION.md')", }, - "file_path": { - "type": "string", - "description": "Path of the file to write (e.g. 'SESSION.md')" - } }, - "required": [ - "file_path", - "content" - ] + required: ['file_path', 'content'], }, resultSchema: undefined, }, - ["crawl_website"]: { + crawl_website: { parameters: { - "type": "object", - "properties": { - "exclude_paths": { - "type": "array", - "description": "Skip URLs matching these patterns", - "items": { - "type": "string" - } + type: 'object', + properties: { + exclude_paths: { + type: 'array', + description: 'Skip URLs matching these patterns', + items: { + type: 'string', + }, + }, + include_paths: { + type: 'array', + description: 'Only crawl URLs matching these patterns', + items: { + type: 'string', + }, }, - "include_paths": { - "type": "array", - "description": "Only crawl URLs matching these patterns", - "items": { - "type": "string" - } + limit: { + type: 'number', + description: 'Maximum pages to crawl (default 10, max 50)', }, - "limit": { - "type": "number", - "description": "Maximum pages to crawl (default 10, max 50)" + max_depth: { + type: 'number', + description: 'How deep to follow links (default 2)', }, - "max_depth": { - "type": "number", - "description": "How deep to follow links (default 2)" + url: { + type: 'string', + description: 'Starting URL to crawl from', }, - "url": { - "type": "string", - "description": "Starting URL to crawl from" - } }, - "required": [ - "url" - ] + required: ['url'], }, resultSchema: undefined, }, - ["create_file"]: { + create_file: { parameters: { - "type": "object", - "properties": { - "contentType": { - "type": "string", - "description": "Optional MIME type override. Usually omit and let the system infer from the file extension." + type: 'object', + properties: { + contentType: { + type: 'string', + description: + 'Optional MIME type override. Usually omit and let the system infer from the file extension.', + }, + fileName: { + type: 'string', + description: + 'Plain workspace filename including extension, e.g. "main.py" or "report.md". Must not contain slashes.', }, - "fileName": { - "type": "string", - "description": "Plain workspace filename including extension, e.g. \"main.py\" or \"report.md\". Must not contain slashes." - } }, - "required": [ - "fileName" - ] + required: ['fileName'], }, resultSchema: { - "type": "object", - "properties": { - "data": { - "type": "object", - "description": "Contains id (the fileId) and name." + type: 'object', + properties: { + data: { + type: 'object', + description: 'Contains id (the fileId) and name.', + }, + message: { + type: 'string', + description: 'Human-readable outcome.', }, - "message": { - "type": "string", - "description": "Human-readable outcome." + success: { + type: 'boolean', + description: 'Whether the file was created.', }, - "success": { - "type": "boolean", - "description": "Whether the file was created." - } }, - "required": [ - "success", - "message" - ] + required: ['success', 'message'], }, }, - ["create_folder"]: { + create_folder: { parameters: { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "Folder name." + type: 'object', + properties: { + name: { + type: 'string', + description: 'Folder name.', + }, + parentId: { + type: 'string', + description: 'Optional parent folder ID.', }, - "parentId": { - "type": "string", - "description": "Optional parent folder ID." + workspaceId: { + type: 'string', + description: 'Optional workspace ID.', }, - "workspaceId": { - "type": "string", - "description": "Optional workspace ID." - } }, - "required": [ - "name" - ] + required: ['name'], }, resultSchema: undefined, }, - ["create_job"]: { + create_job: { parameters: { - "type": "object", - "properties": { - "cron": { - "type": "string", - "description": "Cron expression for recurring jobs (e.g., '*/5 * * * *' for every 5 minutes, '0 9 * * *' for daily at 9 AM). Omit for one-time jobs." + type: 'object', + properties: { + cron: { + type: 'string', + description: + "Cron expression for recurring jobs (e.g., '*/5 * * * *' for every 5 minutes, '0 9 * * *' for daily at 9 AM). Omit for one-time jobs.", }, - "lifecycle": { - "type": "string", - "description": "'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called after the success condition is met.", - "enum": [ - "persistent", - "until_complete" - ] + lifecycle: { + type: 'string', + description: + "'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called after the success condition is met.", + enum: ['persistent', 'until_complete'], }, - "maxRuns": { - "type": "integer", - "description": "Maximum number of executions before the job auto-completes. Safety limit to prevent runaway polling." + maxRuns: { + type: 'integer', + description: + 'Maximum number of executions before the job auto-completes. Safety limit to prevent runaway polling.', }, - "prompt": { - "type": "string", - "description": "The prompt to execute when the job fires. This is sent to the Mothership as a user message." + prompt: { + type: 'string', + description: + 'The prompt to execute when the job fires. This is sent to the Mothership as a user message.', }, - "successCondition": { - "type": "string", - "description": "What must happen for the job to be considered complete. Used with until_complete lifecycle (e.g., 'John has replied to the partnership email')." + successCondition: { + type: 'string', + description: + "What must happen for the job to be considered complete. Used with until_complete lifecycle (e.g., 'John has replied to the partnership email').", }, - "time": { - "type": "string", - "description": "ISO 8601 datetime for one-time execution or as the start time for a cron schedule (e.g., '2026-03-06T09:00:00'). Include timezone offset or use the timezone parameter." + time: { + type: 'string', + description: + "ISO 8601 datetime for one-time execution or as the start time for a cron schedule (e.g., '2026-03-06T09:00:00'). Include timezone offset or use the timezone parameter.", }, - "timezone": { - "type": "string", - "description": "IANA timezone for the schedule (e.g., 'America/New_York', 'Europe/London'). Defaults to UTC." + timezone: { + type: 'string', + description: + "IANA timezone for the schedule (e.g., 'America/New_York', 'Europe/London'). Defaults to UTC.", + }, + title: { + type: 'string', + description: + "A short, descriptive title for the job (e.g., 'Email Poller', 'Daily Report'). Used as the display name.", }, - "title": { - "type": "string", - "description": "A short, descriptive title for the job (e.g., 'Email Poller', 'Daily Report'). Used as the display name." - } }, - "required": [ - "title", - "prompt" - ] + required: ['title', 'prompt'], }, resultSchema: undefined, }, - ["create_workflow"]: { + create_workflow: { parameters: { - "type": "object", - "properties": { - "description": { - "type": "string", - "description": "Optional workflow description." + type: 'object', + properties: { + description: { + type: 'string', + description: 'Optional workflow description.', + }, + folderId: { + type: 'string', + description: 'Optional folder ID.', }, - "folderId": { - "type": "string", - "description": "Optional folder ID." + name: { + type: 'string', + description: 'Workflow name.', }, - "name": { - "type": "string", - "description": "Workflow name." + workspaceId: { + type: 'string', + description: 'Optional workspace ID.', }, - "workspaceId": { - "type": "string", - "description": "Optional workspace ID." - } }, - "required": [ - "name" - ] + required: ['name'], }, resultSchema: undefined, }, - ["create_workspace_mcp_server"]: { + create_workspace_mcp_server: { parameters: { - "type": "object", - "properties": { - "description": { - "type": "string", - "description": "Optional description for the server" + type: 'object', + properties: { + description: { + type: 'string', + description: 'Optional description for the server', }, - "name": { - "type": "string", - "description": "Required: server name" + name: { + type: 'string', + description: 'Required: server name', + }, + workspaceId: { + type: 'string', + description: 'Workspace ID (defaults to current workspace)', }, - "workspaceId": { - "type": "string", - "description": "Workspace ID (defaults to current workspace)" - } }, - "required": [ - "name" - ] + required: ['name'], }, resultSchema: undefined, }, - ["debug"]: { + debug: { parameters: { - "properties": { - "context": { - "description": "Pre-gathered context: workflow state JSON, block schemas, error logs. The debug agent will skip re-reading anything included here.", - "type": "string" + properties: { + context: { + description: + 'Pre-gathered context: workflow state JSON, block schemas, error logs. The debug agent will skip re-reading anything included here.', + type: 'string', + }, + request: { + description: + 'What to debug. Include error messages, block IDs, and any context about the failure.', + type: 'string', }, - "request": { - "description": "What to debug. Include error messages, block IDs, and any context about the failure.", - "type": "string" - } }, - "required": [ - "request" - ], - "type": "object" + required: ['request'], + type: 'object', }, resultSchema: undefined, }, - ["delete_file"]: { + delete_file: { parameters: { - "type": "object", - "properties": { - "fileIds": { - "type": "array", - "description": "Canonical workspace file IDs of the files to delete.", - "items": { - "type": "string" - } - } + type: 'object', + properties: { + fileIds: { + type: 'array', + description: 'Canonical workspace file IDs of the files to delete.', + items: { + type: 'string', + }, + }, }, - "required": [ - "fileIds" - ] + required: ['fileIds'], }, resultSchema: { - "type": "object", - "properties": { - "message": { - "type": "string", - "description": "Human-readable outcome." + type: 'object', + properties: { + message: { + type: 'string', + description: 'Human-readable outcome.', + }, + success: { + type: 'boolean', + description: 'Whether the delete succeeded.', }, - "success": { - "type": "boolean", - "description": "Whether the delete succeeded." - } }, - "required": [ - "success", - "message" - ] + required: ['success', 'message'], }, }, - ["delete_folder"]: { + delete_folder: { parameters: { - "type": "object", - "properties": { - "folderIds": { - "type": "array", - "description": "The folder IDs to delete.", - "items": { - "type": "string" - } - } + type: 'object', + properties: { + folderIds: { + type: 'array', + description: 'The folder IDs to delete.', + items: { + type: 'string', + }, + }, }, - "required": [ - "folderIds" - ] + required: ['folderIds'], }, resultSchema: undefined, }, - ["delete_workflow"]: { + delete_workflow: { parameters: { - "type": "object", - "properties": { - "workflowIds": { - "type": "array", - "description": "The workflow IDs to delete.", - "items": { - "type": "string" - } - } + type: 'object', + properties: { + workflowIds: { + type: 'array', + description: 'The workflow IDs to delete.', + items: { + type: 'string', + }, + }, }, - "required": [ - "workflowIds" - ] + required: ['workflowIds'], }, resultSchema: undefined, }, - ["delete_workspace_mcp_server"]: { + delete_workspace_mcp_server: { parameters: { - "type": "object", - "properties": { - "serverId": { - "type": "string", - "description": "Required: the MCP server ID to delete" - } + type: 'object', + properties: { + serverId: { + type: 'string', + description: 'Required: the MCP server ID to delete', + }, }, - "required": [ - "serverId" - ] + required: ['serverId'], }, resultSchema: undefined, }, - ["deploy"]: { + deploy: { parameters: { - "properties": { - "request": { - "description": "Detailed deployment instructions. Include deployment type (api/chat) and ALL user-specified options: identifier, title, description, authType, password, allowedEmails, welcomeMessage, outputConfigs (block outputs to display).", - "type": "string" - } + properties: { + request: { + description: + 'Detailed deployment instructions. Include deployment type (api/chat) and ALL user-specified options: identifier, title, description, authType, password, allowedEmails, welcomeMessage, outputConfigs (block outputs to display).', + type: 'string', + }, }, - "required": [ - "request" - ], - "type": "object" + required: ['request'], + type: 'object', }, resultSchema: undefined, }, - ["deploy_api"]: { + deploy_api: { parameters: { - "type": "object", - "properties": { - "action": { - "type": "string", - "description": "Whether to deploy or undeploy the API endpoint", - "enum": [ - "deploy", - "undeploy" - ], - "default": "deploy" + type: 'object', + properties: { + action: { + type: 'string', + description: 'Whether to deploy or undeploy the API endpoint', + enum: ['deploy', 'undeploy'], + default: 'deploy', }, - "workflowId": { - "type": "string", - "description": "Workflow ID to deploy (required in workspace context)" - } - } + workflowId: { + type: 'string', + description: 'Workflow ID to deploy (required in workspace context)', + }, + }, }, resultSchema: { - "type": "object", - "properties": { - "apiEndpoint": { - "type": "string", - "description": "Canonical workflow execution endpoint." - }, - "baseUrl": { - "type": "string", - "description": "Base URL used to construct deployment URLs." - }, - "deployedAt": { - "type": "string", - "description": "Deployment timestamp when the workflow is deployed." - }, - "deploymentConfig": { - "type": "object", - "description": "Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details." - }, - "deploymentStatus": { - "type": "object", - "description": "Structured per-surface deployment status keyed by surface name, such as api." - }, - "deploymentType": { - "type": "string", - "description": "Deployment surface this result describes. For deploy_api and redeploy this is always \"api\"." - }, - "examples": { - "type": "object", - "description": "Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling." - }, - "isDeployed": { - "type": "boolean", - "description": "Whether the workflow API is currently deployed after this tool call." - }, - "version": { - "type": "number", - "description": "Deployment version for the current API deployment." - }, - "workflowId": { - "type": "string", - "description": "Workflow ID that was deployed or undeployed." - } - }, - "required": [ - "workflowId", - "isDeployed", - "deploymentType", - "deploymentStatus", - "deploymentConfig", - "examples" - ] - }, - }, - ["deploy_chat"]: { - parameters: { - "type": "object", - "properties": { - "action": { - "type": "string", - "description": "Whether to deploy or undeploy the chat interface", - "enum": [ - "deploy", - "undeploy" - ], - "default": "deploy" - }, - "allowedEmails": { - "type": "array", - "description": "List of allowed emails/domains for email or SSO auth", - "items": { - "type": "string" - } - }, - "authType": { - "type": "string", - "description": "Authentication type: public, password, email, or sso", - "enum": [ - "public", - "password", - "email", - "sso" - ], - "default": "public" - }, - "description": { - "type": "string", - "description": "Optional description for the chat" - }, - "identifier": { - "type": "string", - "description": "URL slug for the chat (lowercase letters, numbers, hyphens only)" - }, - "outputConfigs": { - "type": "array", - "description": "Output configurations specifying which block outputs to display in chat", - "items": { - "type": "object", - "properties": { - "blockId": { - "type": "string", - "description": "The block UUID" + type: 'object', + properties: { + apiEndpoint: { + type: 'string', + description: 'Canonical workflow execution endpoint.', + }, + baseUrl: { + type: 'string', + description: 'Base URL used to construct deployment URLs.', + }, + deployedAt: { + type: 'string', + description: 'Deployment timestamp when the workflow is deployed.', + }, + deploymentConfig: { + type: 'object', + description: + 'Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details.', + }, + deploymentStatus: { + type: 'object', + description: + 'Structured per-surface deployment status keyed by surface name, such as api.', + }, + deploymentType: { + type: 'string', + description: + 'Deployment surface this result describes. For deploy_api and redeploy this is always "api".', + }, + examples: { + type: 'object', + description: + 'Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling.', + }, + isDeployed: { + type: 'boolean', + description: 'Whether the workflow API is currently deployed after this tool call.', + }, + version: { + type: 'number', + description: 'Deployment version for the current API deployment.', + }, + workflowId: { + type: 'string', + description: 'Workflow ID that was deployed or undeployed.', + }, + }, + required: [ + 'workflowId', + 'isDeployed', + 'deploymentType', + 'deploymentStatus', + 'deploymentConfig', + 'examples', + ], + }, + }, + deploy_chat: { + parameters: { + type: 'object', + properties: { + action: { + type: 'string', + description: 'Whether to deploy or undeploy the chat interface', + enum: ['deploy', 'undeploy'], + default: 'deploy', + }, + allowedEmails: { + type: 'array', + description: 'List of allowed emails/domains for email or SSO auth', + items: { + type: 'string', + }, + }, + authType: { + type: 'string', + description: 'Authentication type: public, password, email, or sso', + enum: ['public', 'password', 'email', 'sso'], + default: 'public', + }, + description: { + type: 'string', + description: 'Optional description for the chat', + }, + identifier: { + type: 'string', + description: 'URL slug for the chat (lowercase letters, numbers, hyphens only)', + }, + outputConfigs: { + type: 'array', + description: 'Output configurations specifying which block outputs to display in chat', + items: { + type: 'object', + properties: { + blockId: { + type: 'string', + description: 'The block UUID', + }, + path: { + type: 'string', + description: "The output path (e.g. 'response', 'response.content')", }, - "path": { - "type": "string", - "description": "The output path (e.g. 'response', 'response.content')" - } - }, - "required": [ - "blockId", - "path" - ] - } - }, - "password": { - "type": "string", - "description": "Password for password-protected chats" - }, - "title": { - "type": "string", - "description": "Display title for the chat interface" - }, - "welcomeMessage": { - "type": "string", - "description": "Welcome message shown to users" - }, - "workflowId": { - "type": "string", - "description": "Workflow ID to deploy (required in workspace context)" - } - } + }, + required: ['blockId', 'path'], + }, + }, + password: { + type: 'string', + description: 'Password for password-protected chats', + }, + title: { + type: 'string', + description: 'Display title for the chat interface', + }, + welcomeMessage: { + type: 'string', + description: 'Welcome message shown to users', + }, + workflowId: { + type: 'string', + description: 'Workflow ID to deploy (required in workspace context)', + }, + }, }, resultSchema: { - "type": "object", - "properties": { - "action": { - "type": "string", - "description": "Action performed by the tool, such as \"deploy\" or \"undeploy\"." - }, - "apiEndpoint": { - "type": "string", - "description": "Paired workflow execution endpoint used by the chat deployment." - }, - "baseUrl": { - "type": "string", - "description": "Base URL used to construct deployment URLs." - }, - "chatUrl": { - "type": "string", - "description": "Shareable chat URL when the chat surface is deployed." - }, - "deployedAt": { - "type": "string", - "description": "Deployment timestamp for the underlying workflow deployment." - }, - "deploymentConfig": { - "type": "object", - "description": "Structured deployment configuration keyed by surface name. Includes chat settings and the paired API invocation configuration." - }, - "deploymentStatus": { - "type": "object", - "description": "Structured per-surface deployment status keyed by surface name, including api and chat." - }, - "deploymentType": { - "type": "string", - "description": "Deployment surface this result describes. For deploy_chat this is always \"chat\"." - }, - "examples": { - "type": "object", - "description": "Invocation examples keyed by surface name. Includes chat access details and API curl examples." - }, - "identifier": { - "type": "string", - "description": "Chat identifier or slug." - }, - "isChatDeployed": { - "type": "boolean", - "description": "Whether the chat surface is deployed after this tool call." - }, - "isDeployed": { - "type": "boolean", - "description": "Whether the paired API surface remains deployed after this tool call." - }, - "success": { - "type": "boolean", - "description": "Whether the deploy_chat action completed successfully." - }, - "version": { - "type": "number", - "description": "Deployment version for the underlying workflow deployment." - }, - "workflowId": { - "type": "string", - "description": "Workflow ID associated with the chat deployment." - } - }, - "required": [ - "workflowId", - "success", - "action", - "isDeployed", - "isChatDeployed", - "deploymentType", - "deploymentStatus", - "deploymentConfig", - "examples" - ] - }, - }, - ["deploy_mcp"]: { - parameters: { - "type": "object", - "properties": { - "parameterDescriptions": { - "type": "array", - "description": "Array of parameter descriptions for the tool", - "items": { - "type": "object", - "properties": { - "description": { - "type": "string", - "description": "Parameter description" + type: 'object', + properties: { + action: { + type: 'string', + description: 'Action performed by the tool, such as "deploy" or "undeploy".', + }, + apiEndpoint: { + type: 'string', + description: 'Paired workflow execution endpoint used by the chat deployment.', + }, + baseUrl: { + type: 'string', + description: 'Base URL used to construct deployment URLs.', + }, + chatUrl: { + type: 'string', + description: 'Shareable chat URL when the chat surface is deployed.', + }, + deployedAt: { + type: 'string', + description: 'Deployment timestamp for the underlying workflow deployment.', + }, + deploymentConfig: { + type: 'object', + description: + 'Structured deployment configuration keyed by surface name. Includes chat settings and the paired API invocation configuration.', + }, + deploymentStatus: { + type: 'object', + description: + 'Structured per-surface deployment status keyed by surface name, including api and chat.', + }, + deploymentType: { + type: 'string', + description: + 'Deployment surface this result describes. For deploy_chat this is always "chat".', + }, + examples: { + type: 'object', + description: + 'Invocation examples keyed by surface name. Includes chat access details and API curl examples.', + }, + identifier: { + type: 'string', + description: 'Chat identifier or slug.', + }, + isChatDeployed: { + type: 'boolean', + description: 'Whether the chat surface is deployed after this tool call.', + }, + isDeployed: { + type: 'boolean', + description: 'Whether the paired API surface remains deployed after this tool call.', + }, + success: { + type: 'boolean', + description: 'Whether the deploy_chat action completed successfully.', + }, + version: { + type: 'number', + description: 'Deployment version for the underlying workflow deployment.', + }, + workflowId: { + type: 'string', + description: 'Workflow ID associated with the chat deployment.', + }, + }, + required: [ + 'workflowId', + 'success', + 'action', + 'isDeployed', + 'isChatDeployed', + 'deploymentType', + 'deploymentStatus', + 'deploymentConfig', + 'examples', + ], + }, + }, + deploy_mcp: { + parameters: { + type: 'object', + properties: { + parameterDescriptions: { + type: 'array', + description: 'Array of parameter descriptions for the tool', + items: { + type: 'object', + properties: { + description: { + type: 'string', + description: 'Parameter description', + }, + name: { + type: 'string', + description: 'Parameter name', }, - "name": { - "type": "string", - "description": "Parameter name" - } - }, - "required": [ - "name", - "description" - ] - } - }, - "serverId": { - "type": "string", - "description": "Required: server ID from list_workspace_mcp_servers" - }, - "toolDescription": { - "type": "string", - "description": "Description for the MCP tool" - }, - "toolName": { - "type": "string", - "description": "Name for the MCP tool (defaults to workflow name)" - }, - "workflowId": { - "type": "string", - "description": "Workflow ID (defaults to active workflow)" - } - }, - "required": [ - "serverId" - ] + }, + required: ['name', 'description'], + }, + }, + serverId: { + type: 'string', + description: 'Required: server ID from list_workspace_mcp_servers', + }, + toolDescription: { + type: 'string', + description: 'Description for the MCP tool', + }, + toolName: { + type: 'string', + description: 'Name for the MCP tool (defaults to workflow name)', + }, + workflowId: { + type: 'string', + description: 'Workflow ID (defaults to active workflow)', + }, + }, + required: ['serverId'], }, resultSchema: { - "type": "object", - "properties": { - "action": { - "type": "string", - "description": "Action performed by the tool, such as \"deploy\" or \"undeploy\"." + type: 'object', + properties: { + action: { + type: 'string', + description: 'Action performed by the tool, such as "deploy" or "undeploy".', }, - "apiEndpoint": { - "type": "string", - "description": "Underlying workflow API endpoint associated with the MCP tool." + apiEndpoint: { + type: 'string', + description: 'Underlying workflow API endpoint associated with the MCP tool.', }, - "baseUrl": { - "type": "string", - "description": "Base URL used to construct deployment URLs." + baseUrl: { + type: 'string', + description: 'Base URL used to construct deployment URLs.', }, - "deploymentConfig": { - "type": "object", - "description": "Structured deployment configuration keyed by surface name. Includes MCP server, tool, auth, and parameter schema details." + deploymentConfig: { + type: 'object', + description: + 'Structured deployment configuration keyed by surface name. Includes MCP server, tool, auth, and parameter schema details.', }, - "deploymentStatus": { - "type": "object", - "description": "Structured per-surface deployment status keyed by surface name, including mcp and the underlying api surface when applicable." + deploymentStatus: { + type: 'object', + description: + 'Structured per-surface deployment status keyed by surface name, including mcp and the underlying api surface when applicable.', }, - "deploymentType": { - "type": "string", - "description": "Deployment surface this result describes. For deploy_mcp this is always \"mcp\"." + deploymentType: { + type: 'string', + description: + 'Deployment surface this result describes. For deploy_mcp this is always "mcp".', }, - "examples": { - "type": "object", - "description": "Setup examples keyed by surface name. Includes ready-to-paste config snippets for supported MCP clients." + examples: { + type: 'object', + description: + 'Setup examples keyed by surface name. Includes ready-to-paste config snippets for supported MCP clients.', }, - "mcpServerUrl": { - "type": "string", - "description": "HTTP MCP server URL to configure in clients." + mcpServerUrl: { + type: 'string', + description: 'HTTP MCP server URL to configure in clients.', }, - "removed": { - "type": "boolean", - "description": "Whether the MCP deployment was removed during an undeploy action." + removed: { + type: 'boolean', + description: 'Whether the MCP deployment was removed during an undeploy action.', }, - "serverId": { - "type": "string", - "description": "Workspace MCP server ID." + serverId: { + type: 'string', + description: 'Workspace MCP server ID.', }, - "serverName": { - "type": "string", - "description": "Workspace MCP server name." + serverName: { + type: 'string', + description: 'Workspace MCP server name.', }, - "toolDescription": { - "type": "string", - "description": "MCP tool description exposed on the server." + toolDescription: { + type: 'string', + description: 'MCP tool description exposed on the server.', }, - "toolId": { - "type": "string", - "description": "MCP tool ID when deployed." + toolId: { + type: 'string', + description: 'MCP tool ID when deployed.', }, - "toolName": { - "type": "string", - "description": "MCP tool name exposed on the server." + toolName: { + type: 'string', + description: 'MCP tool name exposed on the server.', }, - "updated": { - "type": "boolean", - "description": "Whether an existing MCP tool deployment was updated instead of created." + updated: { + type: 'boolean', + description: 'Whether an existing MCP tool deployment was updated instead of created.', + }, + workflowId: { + type: 'string', + description: 'Workflow ID associated with the MCP deployment.', }, - "workflowId": { - "type": "string", - "description": "Workflow ID associated with the MCP deployment." - } }, - "required": [ - "deploymentType", - "deploymentStatus" - ] + required: ['deploymentType', 'deploymentStatus'], }, }, - ["download_to_workspace_file"]: { + download_to_workspace_file: { parameters: { - "type": "object", - "properties": { - "fileName": { - "type": "string", - "description": "Optional workspace file name to save as. If omitted, the name is inferred from the response or URL." + type: 'object', + properties: { + fileName: { + type: 'string', + description: + 'Optional workspace file name to save as. If omitted, the name is inferred from the response or URL.', + }, + url: { + type: 'string', + description: + 'Direct URL of the file to download, such as an image CDN URL ending in .png or .jpg', }, - "url": { - "type": "string", - "description": "Direct URL of the file to download, such as an image CDN URL ending in .png or .jpg" - } }, - "required": [ - "url" - ] + required: ['url'], }, resultSchema: undefined, }, - ["edit_content"]: { + edit_content: { parameters: { - "type": "object", - "properties": { - "content": { - "type": "string", - "description": "The text content to write. For append: text to append. For update: full replacement text. For patch with search_replace: the replacement text. For patch with anchored: the insert/replacement text." - } + type: 'object', + properties: { + content: { + type: 'string', + description: + 'The text content to write. For append: text to append. For update: full replacement text. For patch with search_replace: the replacement text. For patch with anchored: the insert/replacement text.', + }, }, - "required": [ - "content" - ] + required: ['content'], }, resultSchema: { - "type": "object", - "properties": { - "data": { - "type": "object", - "description": "Optional operation metadata such as file id, file name, size, and content type." - }, - "message": { - "type": "string", - "description": "Human-readable summary of the outcome." - }, - "success": { - "type": "boolean", - "description": "Whether the content was applied successfully." - } - }, - "required": [ - "success", - "message" - ] - }, - }, - ["edit_workflow"]: { - parameters: { - "type": "object", - "properties": { - "operations": { - "type": "array", - "description": "Array of edit operations", - "items": { - "type": "object", - "properties": { - "block_id": { - "type": "string", - "description": "Block ID for the operation. For add operations, this will be the desired ID for the new block." + type: 'object', + properties: { + data: { + type: 'object', + description: + 'Optional operation metadata such as file id, file name, size, and content type.', + }, + message: { + type: 'string', + description: 'Human-readable summary of the outcome.', + }, + success: { + type: 'boolean', + description: 'Whether the content was applied successfully.', + }, + }, + required: ['success', 'message'], + }, + }, + edit_workflow: { + parameters: { + type: 'object', + properties: { + operations: { + type: 'array', + description: 'Array of edit operations', + items: { + type: 'object', + properties: { + block_id: { + type: 'string', + description: + 'Block ID for the operation. For add operations, this will be the desired ID for the new block.', }, - "operation_type": { - "type": "string", - "description": "Type of operation to perform", - "enum": [ - "add", - "edit", - "delete", - "insert_into_subflow", - "extract_from_subflow" - ] + operation_type: { + type: 'string', + description: 'Type of operation to perform', + enum: ['add', 'edit', 'delete', 'insert_into_subflow', 'extract_from_subflow'], }, - "params": { - "type": "object", - "description": "Parameters for the operation. \nFor edit: {\"inputs\": {\"temperature\": 0.5}} NOT {\"subBlocks\": {\"temperature\": {\"value\": 0.5}}}\nFor add: {\"type\": \"agent\", \"name\": \"My Agent\", \"inputs\": {\"model\": \"gpt-4o\"}}\nFor delete: {} (empty object)" - } - }, - "required": [ - "operation_type", - "block_id", - "params" - ] - } + params: { + type: 'object', + description: + 'Parameters for the operation. \nFor edit: {"inputs": {"temperature": 0.5}} NOT {"subBlocks": {"temperature": {"value": 0.5}}}\nFor add: {"type": "agent", "name": "My Agent", "inputs": {"model": "gpt-4o"}}\nFor delete: {} (empty object)', + }, + }, + required: ['operation_type', 'block_id', 'params'], + }, + }, + workflowId: { + type: 'string', + description: + 'Optional workflow ID to edit. If not provided, uses the current workflow in context.', }, - "workflowId": { - "type": "string", - "description": "Optional workflow ID to edit. If not provided, uses the current workflow in context." - } }, - "required": [ - "operations" - ] + required: ['operations'], }, resultSchema: undefined, }, - ["file"]: { - parameters: { - "type": "object" - }, - resultSchema: undefined, - }, - ["function_execute"]: { - parameters: { - "type": "object", - "properties": { - "code": { - "type": "string", - "description": "Code to execute. For JS: raw statements auto-wrapped in async context. For Python: full script. For shell: bash script with access to pre-installed CLI tools and workspace env vars as $VAR_NAME." - }, - "inputFiles": { - "type": "array", - "description": "Canonical workspace file IDs to mount in the sandbox. Discover IDs via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\"). Mounted path: /home/user/files/{fileId}/{originalName}. Example: [\"wf_123\"]", - "items": { - "type": "string" - } - }, - "inputTables": { - "type": "array", - "description": "Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Example: [\"tbl_abc123\"]", - "items": { - "type": "string" - } - }, - "language": { - "type": "string", - "description": "Execution language.", - "enum": [ - "javascript", - "python", - "shell" - ] - }, - "outputFormat": { - "type": "string", - "description": "Format for outputPath. Determines how the code result is serialized. If omitted, inferred from outputPath file extension.", - "enum": [ - "json", - "csv", - "txt", - "md", - "html" - ] - }, - "outputMimeType": { - "type": "string", - "description": "MIME type for outputSandboxPath export. Required for binary files: image/png, image/jpeg, application/pdf, etc. Omit for text files." - }, - "outputPath": { - "type": "string", - "description": "Pipe output directly to a NEW workspace file instead of returning in context. ALWAYS use this instead of a separate workspace_file write call. Use a flat path like \"files/result.json\" — nested paths are not supported." - }, - "outputSandboxPath": { - "type": "string", - "description": "Path to a file created inside the sandbox that should be exported to the workspace. Use together with outputPath." - }, - "outputTable": { - "type": "string", - "description": "Table ID to overwrite with the code's return value. Code MUST return an array of objects where keys match column names. All existing rows are replaced. Example: \"tbl_abc123\"" - } - }, - "required": [ - "code" - ] - }, + file: { + parameters: { + type: 'object', + }, resultSchema: undefined, }, - ["generate_api_key"]: { - parameters: { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "A descriptive name for the API key (e.g., 'production-key', 'dev-testing')." - }, - "workspaceId": { - "type": "string", - "description": "Optional workspace ID. Defaults to user's default workspace." - } - }, - "required": [ - "name" - ] + function_execute: { + parameters: { + type: 'object', + properties: { + code: { + type: 'string', + description: + 'Code to execute. For JS: raw statements auto-wrapped in async context. For Python: full script. For shell: bash script with access to pre-installed CLI tools and workspace env vars as $VAR_NAME.', + }, + inputFiles: { + type: 'array', + description: + 'Canonical workspace file IDs to mount in the sandbox. Discover IDs via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json"). Mounted path: /home/user/files/{fileId}/{originalName}. Example: ["wf_123"]', + items: { + type: 'string', + }, + }, + inputTables: { + type: 'array', + description: + 'Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Example: ["tbl_abc123"]', + items: { + type: 'string', + }, + }, + language: { + type: 'string', + description: 'Execution language.', + enum: ['javascript', 'python', 'shell'], + }, + outputFormat: { + type: 'string', + description: + 'Format for outputPath. Determines how the code result is serialized. If omitted, inferred from outputPath file extension.', + enum: ['json', 'csv', 'txt', 'md', 'html'], + }, + outputMimeType: { + type: 'string', + description: + 'MIME type for outputSandboxPath export. Required for binary files: image/png, image/jpeg, application/pdf, etc. Omit for text files.', + }, + outputPath: { + type: 'string', + description: + 'Pipe output directly to a NEW workspace file instead of returning in context. ALWAYS use this instead of a separate workspace_file write call. Use a flat path like "files/result.json" — nested paths are not supported.', + }, + outputSandboxPath: { + type: 'string', + description: + 'Path to a file created inside the sandbox that should be exported to the workspace. Use together with outputPath.', + }, + outputTable: { + type: 'string', + description: + 'Table ID to overwrite with the code\'s return value. Code MUST return an array of objects where keys match column names. All existing rows are replaced. Example: "tbl_abc123"', + }, + }, + required: ['code'], }, resultSchema: undefined, }, - ["generate_image"]: { + generate_api_key: { parameters: { - "type": "object", - "properties": { - "aspectRatio": { - "type": "string", - "description": "Aspect ratio for the generated image.", - "enum": [ - "1:1", - "16:9", - "9:16", - "4:3", - "3:4" - ] - }, - "fileName": { - "type": "string", - "description": "Output file name. Defaults to \"generated-image.png\". Workspace files are flat, so pass a plain file name, not a nested path." - }, - "overwriteFileId": { - "type": "string", - "description": "If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated image so the existing chat resource stays current instead of creating a duplicate like \"image (1).png\". The file ID is returned by previous generate_image or generate_visualization calls (fileId field), or can be found via read(\"files/by-id/{fileId}/meta.json\")." - }, - "prompt": { - "type": "string", - "description": "Detailed text description of the image to generate, or editing instructions when used with editFileId." - }, - "referenceFileIds": { - "type": "array", - "description": "File IDs of workspace images to include as context for the generation. All images are sent alongside the prompt. Use for: editing a single image (1 file), compositing multiple images together (2+ files), style transfer, face swapping, etc. Order matters — list the primary/base image first. When revising an existing image in place, pair the primary file ID here with overwriteFileId set to that same ID.", - "items": { - "type": "string" - } - } + type: 'object', + properties: { + name: { + type: 'string', + description: + "A descriptive name for the API key (e.g., 'production-key', 'dev-testing').", + }, + workspaceId: { + type: 'string', + description: "Optional workspace ID. Defaults to user's default workspace.", + }, }, - "required": [ - "prompt" - ] + required: ['name'], }, resultSchema: undefined, }, - ["generate_visualization"]: { + generate_image: { parameters: { - "type": "object", - "properties": { - "code": { - "type": "string", - "description": "Python code that generates a visualization using matplotlib. MUST call plt.savefig('/home/user/output.png', dpi=150, bbox_inches='tight') to produce output." + type: 'object', + properties: { + aspectRatio: { + type: 'string', + description: 'Aspect ratio for the generated image.', + enum: ['1:1', '16:9', '9:16', '4:3', '3:4'], + }, + fileName: { + type: 'string', + description: + 'Output file name. Defaults to "generated-image.png". Workspace files are flat, so pass a plain file name, not a nested path.', }, - "fileName": { - "type": "string", - "description": "Output file name. Defaults to \"chart.png\". Workspace files are flat, so pass a plain file name, not a nested path." + overwriteFileId: { + type: 'string', + description: + 'If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated image so the existing chat resource stays current instead of creating a duplicate like "image (1).png". The file ID is returned by previous generate_image or generate_visualization calls (fileId field), or can be found via read("files/by-id/{fileId}/meta.json").', }, - "inputFiles": { - "type": "array", - "description": "Canonical workspace file IDs to mount in the sandbox. Discover IDs via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\"). Mounted path: /home/user/files/{fileId}/{originalName}.", - "items": { - "type": "string" - } + prompt: { + type: 'string', + description: + 'Detailed text description of the image to generate, or editing instructions when used with editFileId.', }, - "inputTables": { - "type": "array", - "description": "Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Read with pandas: pd.read_csv('/home/user/tables/tbl_xxx.csv')", - "items": { - "type": "string" - } + referenceFileIds: { + type: 'array', + description: + 'File IDs of workspace images to include as context for the generation. All images are sent alongside the prompt. Use for: editing a single image (1 file), compositing multiple images together (2+ files), style transfer, face swapping, etc. Order matters — list the primary/base image first. When revising an existing image in place, pair the primary file ID here with overwriteFileId set to that same ID.', + items: { + type: 'string', + }, }, - "overwriteFileId": { - "type": "string", - "description": "If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated chart so the existing chat resource stays current instead of creating a duplicate like \"chart (1).png\". The file ID is returned by previous generate_visualization or generate_image calls (fileId field), or can be found via read(\"files/by-id/{fileId}/meta.json\")." - } }, - "required": [ - "code" - ] + required: ['prompt'], }, resultSchema: undefined, }, - ["get_block_outputs"]: { + generate_visualization: { parameters: { - "type": "object", - "properties": { - "blockIds": { - "type": "array", - "description": "Optional array of block UUIDs. If provided, returns outputs only for those blocks. If not provided, returns outputs for all blocks in the workflow.", - "items": { - "type": "string" - } + type: 'object', + properties: { + code: { + type: 'string', + description: + "Python code that generates a visualization using matplotlib. MUST call plt.savefig('/home/user/output.png', dpi=150, bbox_inches='tight') to produce output.", + }, + fileName: { + type: 'string', + description: + 'Output file name. Defaults to "chart.png". Workspace files are flat, so pass a plain file name, not a nested path.', + }, + inputFiles: { + type: 'array', + description: + 'Canonical workspace file IDs to mount in the sandbox. Discover IDs via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json"). Mounted path: /home/user/files/{fileId}/{originalName}.', + items: { + type: 'string', + }, + }, + inputTables: { + type: 'array', + description: + "Table IDs to mount as CSV files in the sandbox. Each table appears at /home/user/tables/{tableId}.csv with a header row. Read with pandas: pd.read_csv('/home/user/tables/tbl_xxx.csv')", + items: { + type: 'string', + }, }, - "workflowId": { - "type": "string", - "description": "Optional workflow ID. If not provided, uses the current workflow in context." - } - } + overwriteFileId: { + type: 'string', + description: + 'If provided, overwrites the existing workspace file with this ID instead of creating a new file. Use this when the user asks to update, refine, or redo a previously generated chart so the existing chat resource stays current instead of creating a duplicate like "chart (1).png". The file ID is returned by previous generate_visualization or generate_image calls (fileId field), or can be found via read("files/by-id/{fileId}/meta.json").', + }, + }, + required: ['code'], }, resultSchema: undefined, }, - ["get_block_upstream_references"]: { + get_block_outputs: { parameters: { - "type": "object", - "properties": { - "blockIds": { - "type": "array", - "description": "Required array of block UUIDs (minimum 1). Returns what each block can reference based on its position in the workflow graph.", - "items": { - "type": "string" - } + type: 'object', + properties: { + blockIds: { + type: 'array', + description: + 'Optional array of block UUIDs. If provided, returns outputs only for those blocks. If not provided, returns outputs for all blocks in the workflow.', + items: { + type: 'string', + }, + }, + workflowId: { + type: 'string', + description: + 'Optional workflow ID. If not provided, uses the current workflow in context.', }, - "workflowId": { - "type": "string", - "description": "Optional workflow ID. If not provided, uses the current workflow in context." - } }, - "required": [ - "blockIds" - ] }, resultSchema: undefined, }, - ["get_deployed_workflow_state"]: { + get_block_upstream_references: { parameters: { - "type": "object", - "properties": { - "workflowId": { - "type": "string", - "description": "Optional workflow ID. If not provided, uses the current workflow in context." - } - } + type: 'object', + properties: { + blockIds: { + type: 'array', + description: + 'Required array of block UUIDs (minimum 1). Returns what each block can reference based on its position in the workflow graph.', + items: { + type: 'string', + }, + }, + workflowId: { + type: 'string', + description: + 'Optional workflow ID. If not provided, uses the current workflow in context.', + }, + }, + required: ['blockIds'], }, resultSchema: undefined, }, - ["get_deployment_version"]: { + get_deployed_workflow_state: { parameters: { - "type": "object", - "properties": { - "version": { - "type": "number", - "description": "The deployment version number" + type: 'object', + properties: { + workflowId: { + type: 'string', + description: + 'Optional workflow ID. If not provided, uses the current workflow in context.', }, - "workflowId": { - "type": "string", - "description": "The workflow ID" - } }, - "required": [ - "workflowId", - "version" - ] }, resultSchema: undefined, }, - ["get_execution_summary"]: { + get_deployment_version: { parameters: { - "type": "object", - "properties": { - "limit": { - "type": "number", - "description": "Max number of executions to return (default: 10, max: 20)." + type: 'object', + properties: { + version: { + type: 'number', + description: 'The deployment version number', }, - "status": { - "type": "string", - "description": "Filter by status: 'success', 'error', or 'all' (default: 'all').", - "enum": [ - "success", - "error", - "all" - ] + workflowId: { + type: 'string', + description: 'The workflow ID', }, - "workflowId": { - "type": "string", - "description": "Optional workflow ID. If omitted, returns executions across all workflows in the workspace." + }, + required: ['workflowId', 'version'], + }, + resultSchema: undefined, + }, + get_execution_summary: { + parameters: { + type: 'object', + properties: { + limit: { + type: 'number', + description: 'Max number of executions to return (default: 10, max: 20).', + }, + status: { + type: 'string', + description: "Filter by status: 'success', 'error', or 'all' (default: 'all').", + enum: ['success', 'error', 'all'], + }, + workflowId: { + type: 'string', + description: + 'Optional workflow ID. If omitted, returns executions across all workflows in the workspace.', + }, + workspaceId: { + type: 'string', + description: 'Workspace ID to scope executions to.', }, - "workspaceId": { - "type": "string", - "description": "Workspace ID to scope executions to." - } }, - "required": [ - "workspaceId" - ] + required: ['workspaceId'], }, resultSchema: undefined, }, - ["get_job_logs"]: { + get_job_logs: { parameters: { - "type": "object", - "properties": { - "executionId": { - "type": "string", - "description": "Optional execution ID for a specific run." + type: 'object', + properties: { + executionId: { + type: 'string', + description: 'Optional execution ID for a specific run.', + }, + includeDetails: { + type: 'boolean', + description: 'Include tool calls, outputs, and cost details.', }, - "includeDetails": { - "type": "boolean", - "description": "Include tool calls, outputs, and cost details." + jobId: { + type: 'string', + description: 'The job (schedule) ID to get logs for.', }, - "jobId": { - "type": "string", - "description": "The job (schedule) ID to get logs for." + limit: { + type: 'number', + description: 'Max number of entries (default: 3, max: 5)', }, - "limit": { - "type": "number", - "description": "Max number of entries (default: 3, max: 5)" - } }, - "required": [ - "jobId" - ] + required: ['jobId'], }, resultSchema: undefined, }, - ["get_page_contents"]: { + get_page_contents: { parameters: { - "type": "object", - "properties": { - "include_highlights": { - "type": "boolean", - "description": "Include key highlights (default false)" + type: 'object', + properties: { + include_highlights: { + type: 'boolean', + description: 'Include key highlights (default false)', }, - "include_summary": { - "type": "boolean", - "description": "Include AI-generated summary (default false)" + include_summary: { + type: 'boolean', + description: 'Include AI-generated summary (default false)', }, - "include_text": { - "type": "boolean", - "description": "Include full page text (default true)" + include_text: { + type: 'boolean', + description: 'Include full page text (default true)', + }, + urls: { + type: 'array', + description: 'URLs to get content from (max 10)', + items: { + type: 'string', + }, }, - "urls": { - "type": "array", - "description": "URLs to get content from (max 10)", - "items": { - "type": "string" - } - } }, - "required": [ - "urls" - ] + required: ['urls'], }, resultSchema: undefined, }, - ["get_platform_actions"]: { + get_platform_actions: { parameters: { - "type": "object", - "properties": {} + type: 'object', + properties: {}, }, resultSchema: undefined, }, - ["get_workflow_data"]: { + get_workflow_data: { parameters: { - "type": "object", - "properties": { - "data_type": { - "type": "string", - "description": "The type of workflow data to retrieve", - "enum": [ - "global_variables", - "custom_tools", - "mcp_tools", - "files" - ] + type: 'object', + properties: { + data_type: { + type: 'string', + description: 'The type of workflow data to retrieve', + enum: ['global_variables', 'custom_tools', 'mcp_tools', 'files'], + }, + workflowId: { + type: 'string', + description: + 'Optional workflow ID. If not provided, uses the current workflow in context.', }, - "workflowId": { - "type": "string", - "description": "Optional workflow ID. If not provided, uses the current workflow in context." - } }, - "required": [ - "data_type" - ] + required: ['data_type'], }, resultSchema: undefined, }, - ["get_workflow_logs"]: { + get_workflow_logs: { parameters: { - "type": "object", - "properties": { - "executionId": { - "type": "string", - "description": "Optional execution ID to get logs for a specific execution. Use with get_execution_summary to find execution IDs first." + type: 'object', + properties: { + executionId: { + type: 'string', + description: + 'Optional execution ID to get logs for a specific execution. Use with get_execution_summary to find execution IDs first.', }, - "includeDetails": { - "type": "boolean", - "description": "Include detailed info" + includeDetails: { + type: 'boolean', + description: 'Include detailed info', }, - "limit": { - "type": "number", - "description": "Max number of entries (hard limit: 3)" + limit: { + type: 'number', + description: 'Max number of entries (hard limit: 3)', }, - "workflowId": { - "type": "string", - "description": "Optional workflow ID. If not provided, uses the current workflow in context." - } - } + workflowId: { + type: 'string', + description: + 'Optional workflow ID. If not provided, uses the current workflow in context.', + }, + }, }, resultSchema: undefined, }, - ["glob"]: { + glob: { parameters: { - "type": "object", - "properties": { - "pattern": { - "type": "string", - "description": "Glob pattern to match file paths. Supports * (any segment) and ** (any depth)." + type: 'object', + properties: { + pattern: { + type: 'string', + description: + 'Glob pattern to match file paths. Supports * (any segment) and ** (any depth).', + }, + toolTitle: { + type: 'string', + description: + 'Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like "workflow configs" or "knowledge bases", not a full sentence like "Finding workflow configs".', }, - "toolTitle": { - "type": "string", - "description": "Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like \"workflow configs\" or \"knowledge bases\", not a full sentence like \"Finding workflow configs\"." - } }, - "required": [ - "pattern", - "toolTitle" - ] + required: ['pattern', 'toolTitle'], }, resultSchema: undefined, }, - ["grep"]: { + grep: { parameters: { - "type": "object", - "properties": { - "context": { - "type": "number", - "description": "Number of lines to show before and after each match. Only applies to output_mode 'content'." + type: 'object', + properties: { + context: { + type: 'number', + description: + "Number of lines to show before and after each match. Only applies to output_mode 'content'.", }, - "ignoreCase": { - "type": "boolean", - "description": "Case insensitive search (default false)." + ignoreCase: { + type: 'boolean', + description: 'Case insensitive search (default false).', }, - "lineNumbers": { - "type": "boolean", - "description": "Include line numbers in output (default true). Only applies to output_mode 'content'." + lineNumbers: { + type: 'boolean', + description: + "Include line numbers in output (default true). Only applies to output_mode 'content'.", }, - "maxResults": { - "type": "number", - "description": "Maximum number of matches to return (default 50)." + maxResults: { + type: 'number', + description: 'Maximum number of matches to return (default 50).', }, - "output_mode": { - "type": "string", - "description": "Output mode: 'content' shows matching lines (default), 'files_with_matches' shows only file paths, 'count' shows match counts per file.", - "enum": [ - "content", - "files_with_matches", - "count" - ] + output_mode: { + type: 'string', + description: + "Output mode: 'content' shows matching lines (default), 'files_with_matches' shows only file paths, 'count' shows match counts per file.", + enum: ['content', 'files_with_matches', 'count'], }, - "path": { - "type": "string", - "description": "Optional path prefix to scope the search (e.g. 'workflows/', 'environment/', 'internal/', 'components/blocks/')." + path: { + type: 'string', + description: + "Optional path prefix to scope the search (e.g. 'workflows/', 'environment/', 'internal/', 'components/blocks/').", }, - "pattern": { - "type": "string", - "description": "Regex pattern to search for in file contents." + pattern: { + type: 'string', + description: 'Regex pattern to search for in file contents.', + }, + toolTitle: { + type: 'string', + description: + 'Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like "Slack integrations" or "deployed workflows", not a full sentence like "Searching for Slack integrations".', }, - "toolTitle": { - "type": "string", - "description": "Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like \"Slack integrations\" or \"deployed workflows\", not a full sentence like \"Searching for Slack integrations\"." - } }, - "required": [ - "pattern", - "toolTitle" - ] + required: ['pattern', 'toolTitle'], }, resultSchema: undefined, }, - ["job"]: { + job: { parameters: { - "properties": { - "request": { - "description": "What job action is needed.", - "type": "string" - } + properties: { + request: { + description: 'What job action is needed.', + type: 'string', + }, }, - "required": [ - "request" - ], - "type": "object" + required: ['request'], + type: 'object', }, resultSchema: undefined, }, - ["knowledge"]: { + knowledge: { parameters: { - "properties": { - "request": { - "description": "What knowledge base action is needed.", - "type": "string" - } + properties: { + request: { + description: 'What knowledge base action is needed.', + type: 'string', + }, }, - "required": [ - "request" - ], - "type": "object" - }, - resultSchema: undefined, - }, - ["knowledge_base"]: { - parameters: { - "type": "object", - "properties": { - "args": { - "type": "object", - "description": "Arguments for the operation", - "properties": { - "apiKey": { - "type": "string", - "description": "API key for API-key-based connectors (required when connector auth mode is apiKey)" - }, - "chunkingConfig": { - "type": "object", - "description": "Chunking configuration (optional for 'create')", - "properties": { - "maxSize": { - "type": "number", - "description": "Maximum chunk size (100-4000, default: 1024)", - "default": 1024 + required: ['request'], + type: 'object', + }, + resultSchema: undefined, + }, + knowledge_base: { + parameters: { + type: 'object', + properties: { + args: { + type: 'object', + description: 'Arguments for the operation', + properties: { + apiKey: { + type: 'string', + description: + 'API key for API-key-based connectors (required when connector auth mode is apiKey)', + }, + chunkingConfig: { + type: 'object', + description: "Chunking configuration (optional for 'create')", + properties: { + maxSize: { + type: 'number', + description: 'Maximum chunk size (100-4000, default: 1024)', + default: 1024, }, - "minSize": { - "type": "number", - "description": "Minimum chunk size (1-2000, default: 1)", - "default": 1 + minSize: { + type: 'number', + description: 'Minimum chunk size (1-2000, default: 1)', + default: 1, }, - "overlap": { - "type": "number", - "description": "Overlap between chunks (0-500, default: 200)", - "default": 200 - } - } - }, - "connectorId": { - "type": "string", - "description": "Connector ID (required for update_connector, delete_connector, sync_connector)" - }, - "connectorStatus": { - "type": "string", - "description": "Connector status (optional for update_connector)", - "enum": [ - "active", - "paused" - ] - }, - "connectorType": { - "type": "string", - "description": "Connector type from registry, e.g. 'confluence', 'google_drive', 'notion' (required for add_connector). Read knowledgebases/connectors/{type}.json for the config schema." - }, - "credentialId": { - "type": "string", - "description": "OAuth credential ID from environment/credentials.json (required for OAuth connectors)" - }, - "description": { - "type": "string", - "description": "Description of the knowledge base (optional for 'create')" - }, - "disabledTagIds": { - "type": "array", - "description": "Tag definition IDs to opt out of (optional for add_connector). See tagDefinitions in the connector schema." - }, - "documentId": { - "type": "string", - "description": "Document ID (required for update_document)" - }, - "documentIds": { - "type": "array", - "description": "Document IDs (for batch delete_document)", - "items": { - "type": "string" - } - }, - "enabled": { - "type": "boolean", - "description": "Enable/disable a document (optional for update_document)" - }, - "fileIds": { - "type": "array", - "description": "Canonical workspace file IDs to add as documents (for add_file). Discover via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\").", - "items": { - "type": "string" - } - }, - "filename": { - "type": "string", - "description": "New filename for a document (optional for update_document)" - }, - "knowledgeBaseId": { - "type": "string", - "description": "Knowledge base ID (required for get, query, add_file, list_tags, create_tag, get_tag_usage)" - }, - "knowledgeBaseIds": { - "type": "array", - "description": "Knowledge base IDs (for batch delete)", - "items": { - "type": "string" - } - }, - "name": { - "type": "string", - "description": "Name of the knowledge base (required for 'create')" - }, - "query": { - "type": "string", - "description": "Search query text (required for 'query')" - }, - "sourceConfig": { - "type": "object", - "description": "Connector-specific configuration matching the configFields in knowledgebases/connectors/{type}.json" - }, - "syncIntervalMinutes": { - "type": "number", - "description": "Sync interval in minutes: 60 (hourly), 360 (6h), 1440 (daily), 10080 (weekly), 0 (manual only). Default: 1440", - "default": 1440 - }, - "tagDefinitionId": { - "type": "string", - "description": "Tag definition ID (required for update_tag, delete_tag)" - }, - "tagDisplayName": { - "type": "string", - "description": "Display name for the tag (required for create_tag, optional for update_tag)" - }, - "tagFieldType": { - "type": "string", - "description": "Field type: text, number, date, boolean (optional for create_tag, defaults to text)", - "enum": [ - "text", - "number", - "date", - "boolean" - ] - }, - "topK": { - "type": "number", - "description": "Number of results to return (1-50, default: 5)", - "default": 5 - }, - "workspaceId": { - "type": "string", - "description": "Workspace ID (required for 'create', optional filter for 'list')" - } - } - }, - "operation": { - "type": "string", - "description": "The operation to perform", - "enum": [ - "create", - "get", - "query", - "add_file", - "update", - "delete", - "delete_document", - "update_document", - "list_tags", - "create_tag", - "update_tag", - "delete_tag", - "get_tag_usage", - "add_connector", - "update_connector", - "delete_connector", - "sync_connector" - ] - } - }, - "required": [ - "operation", - "args" - ] + overlap: { + type: 'number', + description: 'Overlap between chunks (0-500, default: 200)', + default: 200, + }, + }, + }, + connectorId: { + type: 'string', + description: + 'Connector ID (required for update_connector, delete_connector, sync_connector)', + }, + connectorStatus: { + type: 'string', + description: 'Connector status (optional for update_connector)', + enum: ['active', 'paused'], + }, + connectorType: { + type: 'string', + description: + "Connector type from registry, e.g. 'confluence', 'google_drive', 'notion' (required for add_connector). Read knowledgebases/connectors/{type}.json for the config schema.", + }, + credentialId: { + type: 'string', + description: + 'OAuth credential ID from environment/credentials.json (required for OAuth connectors)', + }, + description: { + type: 'string', + description: "Description of the knowledge base (optional for 'create')", + }, + disabledTagIds: { + type: 'array', + description: + 'Tag definition IDs to opt out of (optional for add_connector). See tagDefinitions in the connector schema.', + }, + documentId: { + type: 'string', + description: 'Document ID (required for update_document)', + }, + documentIds: { + type: 'array', + description: 'Document IDs (for batch delete_document)', + items: { + type: 'string', + }, + }, + enabled: { + type: 'boolean', + description: 'Enable/disable a document (optional for update_document)', + }, + fileIds: { + type: 'array', + description: + 'Canonical workspace file IDs to add as documents (for add_file). Discover via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json").', + items: { + type: 'string', + }, + }, + filename: { + type: 'string', + description: 'New filename for a document (optional for update_document)', + }, + knowledgeBaseId: { + type: 'string', + description: + 'Knowledge base ID (required for get, query, add_file, list_tags, create_tag, get_tag_usage)', + }, + knowledgeBaseIds: { + type: 'array', + description: 'Knowledge base IDs (for batch delete)', + items: { + type: 'string', + }, + }, + name: { + type: 'string', + description: "Name of the knowledge base (required for 'create')", + }, + query: { + type: 'string', + description: "Search query text (required for 'query')", + }, + sourceConfig: { + type: 'object', + description: + 'Connector-specific configuration matching the configFields in knowledgebases/connectors/{type}.json', + }, + syncIntervalMinutes: { + type: 'number', + description: + 'Sync interval in minutes: 60 (hourly), 360 (6h), 1440 (daily), 10080 (weekly), 0 (manual only). Default: 1440', + default: 1440, + }, + tagDefinitionId: { + type: 'string', + description: 'Tag definition ID (required for update_tag, delete_tag)', + }, + tagDisplayName: { + type: 'string', + description: + 'Display name for the tag (required for create_tag, optional for update_tag)', + }, + tagFieldType: { + type: 'string', + description: + 'Field type: text, number, date, boolean (optional for create_tag, defaults to text)', + enum: ['text', 'number', 'date', 'boolean'], + }, + topK: { + type: 'number', + description: 'Number of results to return (1-50, default: 5)', + default: 5, + }, + workspaceId: { + type: 'string', + description: "Workspace ID (required for 'create', optional filter for 'list')", + }, + }, + }, + operation: { + type: 'string', + description: 'The operation to perform', + enum: [ + 'create', + 'get', + 'query', + 'add_file', + 'update', + 'delete', + 'delete_document', + 'update_document', + 'list_tags', + 'create_tag', + 'update_tag', + 'delete_tag', + 'get_tag_usage', + 'add_connector', + 'update_connector', + 'delete_connector', + 'sync_connector', + ], + }, + }, + required: ['operation', 'args'], }, resultSchema: { - "type": "object", - "properties": { - "data": { - "type": "object", - "description": "Operation-specific result payload." + type: 'object', + properties: { + data: { + type: 'object', + description: 'Operation-specific result payload.', }, - "message": { - "type": "string", - "description": "Human-readable outcome summary." + message: { + type: 'string', + description: 'Human-readable outcome summary.', + }, + success: { + type: 'boolean', + description: 'Whether the operation succeeded.', }, - "success": { - "type": "boolean", - "description": "Whether the operation succeeded." - } }, - "required": [ - "success", - "message" - ] + required: ['success', 'message'], }, }, - ["list_folders"]: { + list_folders: { parameters: { - "type": "object", - "properties": { - "workspaceId": { - "type": "string", - "description": "Optional workspace ID to list folders for." - } - } + type: 'object', + properties: { + workspaceId: { + type: 'string', + description: 'Optional workspace ID to list folders for.', + }, + }, }, resultSchema: undefined, }, - ["list_user_workspaces"]: { + list_user_workspaces: { parameters: { - "type": "object", - "properties": {} + type: 'object', + properties: {}, }, resultSchema: undefined, }, - ["list_workspace_mcp_servers"]: { + list_workspace_mcp_servers: { parameters: { - "type": "object", - "properties": { - "workspaceId": { - "type": "string", - "description": "Workspace ID (defaults to current workspace)" - } - } + type: 'object', + properties: { + workspaceId: { + type: 'string', + description: 'Workspace ID (defaults to current workspace)', + }, + }, }, resultSchema: undefined, }, - ["manage_credential"]: { + manage_credential: { parameters: { - "type": "object", - "properties": { - "credentialId": { - "type": "string", - "description": "The credential ID (required for rename)" + type: 'object', + properties: { + credentialId: { + type: 'string', + description: 'The credential ID (required for rename)', }, - "credentialIds": { - "type": "array", - "description": "Array of credential IDs (for batch delete)", - "items": { - "type": "string" - } + credentialIds: { + type: 'array', + description: 'Array of credential IDs (for batch delete)', + items: { + type: 'string', + }, + }, + displayName: { + type: 'string', + description: 'New display name (required for rename)', }, - "displayName": { - "type": "string", - "description": "New display name (required for rename)" + operation: { + type: 'string', + description: 'The operation to perform', + enum: ['rename', 'delete'], }, - "operation": { - "type": "string", - "description": "The operation to perform", - "enum": [ - "rename", - "delete" - ] - } }, - "required": [ - "operation" - ] + required: ['operation'], }, resultSchema: undefined, }, - ["manage_custom_tool"]: { + manage_custom_tool: { parameters: { - "type": "object", - "properties": { - "code": { - "type": "string", - "description": "The JavaScript code that executes when the tool is called (required for add). Parameters from schema are available as variables. Function body only - no signature or wrapping braces." + type: 'object', + properties: { + code: { + type: 'string', + description: + 'The JavaScript code that executes when the tool is called (required for add). Parameters from schema are available as variables. Function body only - no signature or wrapping braces.', }, - "operation": { - "type": "string", - "description": "The operation to perform: 'add', 'edit', 'list', or 'delete'", - "enum": [ - "add", - "edit", - "delete", - "list" - ] + operation: { + type: 'string', + description: "The operation to perform: 'add', 'edit', 'list', or 'delete'", + enum: ['add', 'edit', 'delete', 'list'], }, - "schema": { - "type": "object", - "description": "The tool schema in OpenAI function calling format (required for add).", - "properties": { - "function": { - "type": "object", - "description": "The function definition", - "properties": { - "description": { - "type": "string", - "description": "What the function does" + schema: { + type: 'object', + description: 'The tool schema in OpenAI function calling format (required for add).', + properties: { + function: { + type: 'object', + description: 'The function definition', + properties: { + description: { + type: 'string', + description: 'What the function does', }, - "name": { - "type": "string", - "description": "The function name (camelCase)" + name: { + type: 'string', + description: 'The function name (camelCase)', }, - "parameters": { - "type": "object", - "description": "The function parameters schema", - "properties": { - "properties": { - "type": "object", - "description": "Parameter definitions as key-value pairs" + parameters: { + type: 'object', + description: 'The function parameters schema', + properties: { + properties: { + type: 'object', + description: 'Parameter definitions as key-value pairs', + }, + required: { + type: 'array', + description: 'Array of required parameter names', + items: { + type: 'string', + }, }, - "required": { - "type": "array", - "description": "Array of required parameter names", - "items": { - "type": "string" - } + type: { + type: 'string', + description: "Must be 'object'", }, - "type": { - "type": "string", - "description": "Must be 'object'" - } }, - "required": [ - "type", - "properties" - ] - } + required: ['type', 'properties'], + }, }, - "required": [ - "name", - "parameters" - ] - }, - "type": { - "type": "string", - "description": "Must be 'function'" - } + required: ['name', 'parameters'], + }, + type: { + type: 'string', + description: "Must be 'function'", + }, }, - "required": [ - "type", - "function" - ] - }, - "toolId": { - "type": "string", - "description": "The ID of the custom tool (required for edit). Must be the exact toolId from the get_workflow_data custom tool response - do not guess or construct it. DO NOT PROVIDE THE TOOL ID IF THE OPERATION IS 'ADD'." - }, - "toolIds": { - "type": "array", - "description": "Array of custom tool IDs (for batch delete)", - "items": { - "type": "string" - } - } - }, - "required": [ - "operation" - ] - }, - resultSchema: undefined, - }, - ["manage_job"]: { - parameters: { - "type": "object", - "properties": { - "args": { - "type": "object", - "description": "Operation-specific arguments. For create: {title, prompt, cron?, time?, timezone?, lifecycle?, successCondition?, maxRuns?}. For get/delete: {jobId}. For update: {jobId, title?, prompt?, cron?, timezone?, status?, lifecycle?, successCondition?, maxRuns?}. For list: no args needed.", - "properties": { - "cron": { - "type": "string", - "description": "Cron expression for recurring jobs" - }, - "jobId": { - "type": "string", - "description": "Job ID (required for get, update)" - }, - "jobIds": { - "type": "array", - "description": "Array of job IDs (for batch delete)", - "items": { - "type": "string" - } - }, - "lifecycle": { - "type": "string", - "description": "'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called." - }, - "maxRuns": { - "type": "integer", - "description": "Max executions before auto-completing. Safety limit." - }, - "prompt": { - "type": "string", - "description": "The prompt to execute when the job fires" - }, - "status": { - "type": "string", - "description": "Job status: active, paused" - }, - "successCondition": { - "type": "string", - "description": "What must happen for the job to be considered complete (until_complete lifecycle)." - }, - "time": { - "type": "string", - "description": "ISO 8601 datetime for one-time jobs or cron start time" - }, - "timezone": { - "type": "string", - "description": "IANA timezone (e.g. America/New_York). Defaults to UTC." - }, - "title": { - "type": "string", - "description": "Short descriptive title for the job (e.g. 'Email Poller')" - } - } - }, - "operation": { - "type": "string", - "description": "The operation to perform: create, list, get, update, delete", - "enum": [ - "create", - "list", - "get", - "update", - "delete" - ] - } - }, - "required": [ - "operation" - ] - }, - resultSchema: undefined, - }, - ["manage_mcp_tool"]: { - parameters: { - "type": "object", - "properties": { - "config": { - "type": "object", - "description": "Required for add and edit. The MCP server configuration.", - "properties": { - "enabled": { - "type": "boolean", - "description": "Whether the server is enabled (default: true)" - }, - "headers": { - "type": "object", - "description": "Optional HTTP headers to send with requests (key-value pairs)" - }, - "name": { - "type": "string", - "description": "Display name for the MCP server" - }, - "timeout": { - "type": "number", - "description": "Request timeout in milliseconds (default: 30000)" - }, - "transport": { - "type": "string", - "description": "Transport protocol: 'streamable-http' or 'sse'", - "enum": [ - "streamable-http", - "sse" - ], - "default": "streamable-http" - }, - "url": { - "type": "string", - "description": "The MCP server endpoint URL (required for add)" - } - } - }, - "operation": { - "type": "string", - "description": "The operation to perform: 'add', 'edit', 'list', or 'delete'", - "enum": [ - "add", - "edit", - "delete", - "list" - ] - }, - "serverId": { - "type": "string", - "description": "Required for edit and delete. The database ID of the MCP server. DO NOT PROVIDE if operation is 'add' or 'list'." - } - }, - "required": [ - "operation" - ] - }, - resultSchema: undefined, - }, - ["manage_skill"]: { - parameters: { - "type": "object", - "properties": { - "content": { - "type": "string", - "description": "Markdown instructions for the skill. Required for add, optional for edit." - }, - "description": { - "type": "string", - "description": "Short description of the skill. Required for add, optional for edit." - }, - "name": { - "type": "string", - "description": "Skill name in kebab-case (e.g. 'my-skill'). Required for add, optional for edit." - }, - "operation": { - "type": "string", - "description": "The operation to perform: 'add', 'edit', 'list', or 'delete'", - "enum": [ - "add", - "edit", - "delete", - "list" - ] - }, - "skillId": { - "type": "string", - "description": "The ID of the skill (required for edit/delete). Must be the exact ID from the VFS or list. DO NOT PROVIDE if operation is 'add' or 'list'." - } - }, - "required": [ - "operation" - ] - }, - resultSchema: undefined, - }, - ["materialize_file"]: { - parameters: { - "type": "object", - "properties": { - "fileNames": { - "type": "array", - "description": "The names of the uploaded files to materialize (e.g. [\"report.pdf\", \"data.csv\"])", - "items": { - "type": "string" - } - }, - "knowledgeBaseId": { - "type": "string", - "description": "ID of an existing knowledge base to add the file to (only used with operation \"knowledge_base\"). If omitted, a new KB is created." - }, - "operation": { - "type": "string", - "description": "What to do with the file. \"save\" promotes it to files/. \"import\" imports a workflow JSON. \"table\" converts CSV/TSV/JSON to a table. \"knowledge_base\" saves and adds to a KB. Defaults to \"save\".", - "enum": [ - "save", - "import", - "table", - "knowledge_base" - ], - "default": "save" + required: ['type', 'function'], + }, + toolId: { + type: 'string', + description: + "The ID of the custom tool (required for edit). Must be the exact toolId from the get_workflow_data custom tool response - do not guess or construct it. DO NOT PROVIDE THE TOOL ID IF THE OPERATION IS 'ADD'.", + }, + toolIds: { + type: 'array', + description: 'Array of custom tool IDs (for batch delete)', + items: { + type: 'string', + }, + }, + }, + required: ['operation'], + }, + resultSchema: undefined, + }, + manage_job: { + parameters: { + type: 'object', + properties: { + args: { + type: 'object', + description: + 'Operation-specific arguments. For create: {title, prompt, cron?, time?, timezone?, lifecycle?, successCondition?, maxRuns?}. For get/delete: {jobId}. For update: {jobId, title?, prompt?, cron?, timezone?, status?, lifecycle?, successCondition?, maxRuns?}. For list: no args needed.', + properties: { + cron: { + type: 'string', + description: 'Cron expression for recurring jobs', + }, + jobId: { + type: 'string', + description: 'Job ID (required for get, update)', + }, + jobIds: { + type: 'array', + description: 'Array of job IDs (for batch delete)', + items: { + type: 'string', + }, + }, + lifecycle: { + type: 'string', + description: + "'persistent' (default) or 'until_complete'. Until_complete jobs stop when complete_job is called.", + }, + maxRuns: { + type: 'integer', + description: 'Max executions before auto-completing. Safety limit.', + }, + prompt: { + type: 'string', + description: 'The prompt to execute when the job fires', + }, + status: { + type: 'string', + description: 'Job status: active, paused', + }, + successCondition: { + type: 'string', + description: + 'What must happen for the job to be considered complete (until_complete lifecycle).', + }, + time: { + type: 'string', + description: 'ISO 8601 datetime for one-time jobs or cron start time', + }, + timezone: { + type: 'string', + description: 'IANA timezone (e.g. America/New_York). Defaults to UTC.', + }, + title: { + type: 'string', + description: "Short descriptive title for the job (e.g. 'Email Poller')", + }, + }, + }, + operation: { + type: 'string', + description: 'The operation to perform: create, list, get, update, delete', + enum: ['create', 'list', 'get', 'update', 'delete'], + }, + }, + required: ['operation'], + }, + resultSchema: undefined, + }, + manage_mcp_tool: { + parameters: { + type: 'object', + properties: { + config: { + type: 'object', + description: 'Required for add and edit. The MCP server configuration.', + properties: { + enabled: { + type: 'boolean', + description: 'Whether the server is enabled (default: true)', + }, + headers: { + type: 'object', + description: 'Optional HTTP headers to send with requests (key-value pairs)', + }, + name: { + type: 'string', + description: 'Display name for the MCP server', + }, + timeout: { + type: 'number', + description: 'Request timeout in milliseconds (default: 30000)', + }, + transport: { + type: 'string', + description: "Transport protocol: 'streamable-http' or 'sse'", + enum: ['streamable-http', 'sse'], + default: 'streamable-http', + }, + url: { + type: 'string', + description: 'The MCP server endpoint URL (required for add)', + }, + }, + }, + operation: { + type: 'string', + description: "The operation to perform: 'add', 'edit', 'list', or 'delete'", + enum: ['add', 'edit', 'delete', 'list'], + }, + serverId: { + type: 'string', + description: + "Required for edit and delete. The database ID of the MCP server. DO NOT PROVIDE if operation is 'add' or 'list'.", }, - "tableName": { - "type": "string", - "description": "Custom name for the table (only used with operation \"table\"). Defaults to the file name without extension." - } }, - "required": [ - "fileNames" - ] + required: ['operation'], }, resultSchema: undefined, }, - ["move_folder"]: { + manage_skill: { parameters: { - "type": "object", - "properties": { - "folderId": { - "type": "string", - "description": "The folder ID to move." + type: 'object', + properties: { + content: { + type: 'string', + description: 'Markdown instructions for the skill. Required for add, optional for edit.', + }, + description: { + type: 'string', + description: 'Short description of the skill. Required for add, optional for edit.', + }, + name: { + type: 'string', + description: + "Skill name in kebab-case (e.g. 'my-skill'). Required for add, optional for edit.", + }, + operation: { + type: 'string', + description: "The operation to perform: 'add', 'edit', 'list', or 'delete'", + enum: ['add', 'edit', 'delete', 'list'], + }, + skillId: { + type: 'string', + description: + "The ID of the skill (required for edit/delete). Must be the exact ID from the VFS or list. DO NOT PROVIDE if operation is 'add' or 'list'.", }, - "parentId": { - "type": "string", - "description": "Target parent folder ID. Omit or pass empty string to move to workspace root." - } }, - "required": [ - "folderId" - ] + required: ['operation'], }, resultSchema: undefined, }, - ["move_workflow"]: { + materialize_file: { parameters: { - "type": "object", - "properties": { - "folderId": { - "type": "string", - "description": "Target folder ID. Omit or pass empty string to move to workspace root." + type: 'object', + properties: { + fileNames: { + type: 'array', + description: + 'The names of the uploaded files to materialize (e.g. ["report.pdf", "data.csv"])', + items: { + type: 'string', + }, + }, + knowledgeBaseId: { + type: 'string', + description: + 'ID of an existing knowledge base to add the file to (only used with operation "knowledge_base"). If omitted, a new KB is created.', + }, + operation: { + type: 'string', + description: + 'What to do with the file. "save" promotes it to files/. "import" imports a workflow JSON. "table" converts CSV/TSV/JSON to a table. "knowledge_base" saves and adds to a KB. Defaults to "save".', + enum: ['save', 'import', 'table', 'knowledge_base'], + default: 'save', + }, + tableName: { + type: 'string', + description: + 'Custom name for the table (only used with operation "table"). Defaults to the file name without extension.', }, - "workflowIds": { - "type": "array", - "description": "The workflow IDs to move.", - "items": { - "type": "string" - } - } }, - "required": [ - "workflowIds" - ] + required: ['fileNames'], }, resultSchema: undefined, }, - ["oauth_get_auth_link"]: { + move_folder: { parameters: { - "type": "object", - "properties": { - "providerName": { - "type": "string", - "description": "The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar', 'GitHub')" - } + type: 'object', + properties: { + folderId: { + type: 'string', + description: 'The folder ID to move.', + }, + parentId: { + type: 'string', + description: + 'Target parent folder ID. Omit or pass empty string to move to workspace root.', + }, }, - "required": [ - "providerName" - ] + required: ['folderId'], }, resultSchema: undefined, }, - ["oauth_request_access"]: { + move_workflow: { parameters: { - "type": "object", - "properties": { - "providerName": { - "type": "string", - "description": "The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar')" - } + type: 'object', + properties: { + folderId: { + type: 'string', + description: 'Target folder ID. Omit or pass empty string to move to workspace root.', + }, + workflowIds: { + type: 'array', + description: 'The workflow IDs to move.', + items: { + type: 'string', + }, + }, }, - "required": [ - "providerName" - ] + required: ['workflowIds'], }, resultSchema: undefined, }, - ["open_resource"]: { + oauth_get_auth_link: { parameters: { - "type": "object", - "properties": { - "resources": { - "type": "array", - "description": "Array of resources to open. Each item must have type and id.", - "items": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "The resource ID." + type: 'object', + properties: { + providerName: { + type: 'string', + description: + "The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar', 'GitHub')", + }, + }, + required: ['providerName'], + }, + resultSchema: undefined, + }, + oauth_request_access: { + parameters: { + type: 'object', + properties: { + providerName: { + type: 'string', + description: + "The name of the OAuth provider to connect (e.g., 'Slack', 'Gmail', 'Google Calendar')", + }, + }, + required: ['providerName'], + }, + resultSchema: undefined, + }, + open_resource: { + parameters: { + type: 'object', + properties: { + resources: { + type: 'array', + description: 'Array of resources to open. Each item must have type and id.', + items: { + type: 'object', + properties: { + id: { + type: 'string', + description: 'The resource ID.', + }, + type: { + type: 'string', + description: 'The resource type.', + enum: ['workflow', 'table', 'knowledgebase', 'file', 'log'], }, - "type": { - "type": "string", - "description": "The resource type.", - "enum": [ - "workflow", - "table", - "knowledgebase", - "file", - "log" - ] - } }, - "required": [ - "type", - "id" - ] - } - } + required: ['type', 'id'], + }, + }, }, - "required": [ - "resources" - ] + required: ['resources'], }, resultSchema: undefined, }, - ["read"]: { + read: { parameters: { - "type": "object", - "properties": { - "limit": { - "type": "number", - "description": "Maximum number of lines to read." + type: 'object', + properties: { + limit: { + type: 'number', + description: 'Maximum number of lines to read.', }, - "offset": { - "type": "number", - "description": "Line offset to start reading from (0-indexed)." + offset: { + type: 'number', + description: 'Line offset to start reading from (0-indexed).', }, - "outputTable": { - "type": "string", - "description": "Table ID to import the file contents into (CSV/JSON). All existing rows are replaced. Example: \"tbl_abc123\"" + outputTable: { + type: 'string', + description: + 'Table ID to import the file contents into (CSV/JSON). All existing rows are replaced. Example: "tbl_abc123"', + }, + path: { + type: 'string', + description: + "Path to the file to read (e.g. 'workflows/My Workflow/state.json' or 'workflows/Projects/Q1/My Workflow/state.json').", }, - "path": { - "type": "string", - "description": "Path to the file to read (e.g. 'workflows/My Workflow/state.json' or 'workflows/Projects/Q1/My Workflow/state.json')." - } }, - "required": [ - "path" - ] + required: ['path'], }, resultSchema: undefined, }, - ["redeploy"]: { + redeploy: { parameters: { - "type": "object", - "properties": { - "workflowId": { - "type": "string", - "description": "Workflow ID to redeploy (required in workspace context)" - } - } + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'Workflow ID to redeploy (required in workspace context)', + }, + }, }, resultSchema: { - "type": "object", - "properties": { - "apiEndpoint": { - "type": "string", - "description": "Canonical workflow execution endpoint." - }, - "baseUrl": { - "type": "string", - "description": "Base URL used to construct deployment URLs." - }, - "deployedAt": { - "type": "string", - "description": "Deployment timestamp when the workflow is deployed." - }, - "deploymentConfig": { - "type": "object", - "description": "Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details." - }, - "deploymentStatus": { - "type": "object", - "description": "Structured per-surface deployment status keyed by surface name, such as api." - }, - "deploymentType": { - "type": "string", - "description": "Deployment surface this result describes. For deploy_api and redeploy this is always \"api\"." - }, - "examples": { - "type": "object", - "description": "Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling." - }, - "isDeployed": { - "type": "boolean", - "description": "Whether the workflow API is currently deployed after this tool call." - }, - "version": { - "type": "number", - "description": "Deployment version for the current API deployment." - }, - "workflowId": { - "type": "string", - "description": "Workflow ID that was deployed or undeployed." - } - }, - "required": [ - "workflowId", - "isDeployed", - "deploymentType", - "deploymentStatus", - "deploymentConfig", - "examples" - ] - }, - }, - ["rename_file"]: { - parameters: { - "type": "object", - "properties": { - "fileId": { - "type": "string", - "description": "Canonical workspace file ID of the file to rename." - }, - "newName": { - "type": "string", - "description": "New filename including extension, e.g. \"draft_v2.md\". Must not contain slashes." - } - }, - "required": [ - "fileId", - "newName" - ] + type: 'object', + properties: { + apiEndpoint: { + type: 'string', + description: 'Canonical workflow execution endpoint.', + }, + baseUrl: { + type: 'string', + description: 'Base URL used to construct deployment URLs.', + }, + deployedAt: { + type: 'string', + description: 'Deployment timestamp when the workflow is deployed.', + }, + deploymentConfig: { + type: 'object', + description: + 'Structured deployment configuration keyed by surface name. For API deploys this includes endpoint, auth, and sync/stream/async mode details.', + }, + deploymentStatus: { + type: 'object', + description: + 'Structured per-surface deployment status keyed by surface name, such as api.', + }, + deploymentType: { + type: 'string', + description: + 'Deployment surface this result describes. For deploy_api and redeploy this is always "api".', + }, + examples: { + type: 'object', + description: + 'Invocation examples keyed by surface name. For API deploys this includes curl examples for sync, stream, async, and polling.', + }, + isDeployed: { + type: 'boolean', + description: 'Whether the workflow API is currently deployed after this tool call.', + }, + version: { + type: 'number', + description: 'Deployment version for the current API deployment.', + }, + workflowId: { + type: 'string', + description: 'Workflow ID that was deployed or undeployed.', + }, + }, + required: [ + 'workflowId', + 'isDeployed', + 'deploymentType', + 'deploymentStatus', + 'deploymentConfig', + 'examples', + ], + }, + }, + rename_file: { + parameters: { + type: 'object', + properties: { + fileId: { + type: 'string', + description: 'Canonical workspace file ID of the file to rename.', + }, + newName: { + type: 'string', + description: + 'New filename including extension, e.g. "draft_v2.md". Must not contain slashes.', + }, + }, + required: ['fileId', 'newName'], }, resultSchema: { - "type": "object", - "properties": { - "data": { - "type": "object", - "description": "Contains id and the new name." + type: 'object', + properties: { + data: { + type: 'object', + description: 'Contains id and the new name.', }, - "message": { - "type": "string", - "description": "Human-readable outcome." + message: { + type: 'string', + description: 'Human-readable outcome.', + }, + success: { + type: 'boolean', + description: 'Whether the rename succeeded.', }, - "success": { - "type": "boolean", - "description": "Whether the rename succeeded." - } }, - "required": [ - "success", - "message" - ] + required: ['success', 'message'], }, }, - ["rename_workflow"]: { + rename_workflow: { parameters: { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "The new name for the workflow." + type: 'object', + properties: { + name: { + type: 'string', + description: 'The new name for the workflow.', + }, + workflowId: { + type: 'string', + description: 'The workflow ID to rename.', }, - "workflowId": { - "type": "string", - "description": "The workflow ID to rename." - } }, - "required": [ - "workflowId", - "name" - ] + required: ['workflowId', 'name'], }, resultSchema: undefined, }, - ["research"]: { + research: { parameters: { - "properties": { - "topic": { - "description": "The topic to research.", - "type": "string" - } + properties: { + topic: { + description: 'The topic to research.', + type: 'string', + }, }, - "required": [ - "topic" - ], - "type": "object" + required: ['topic'], + type: 'object', }, resultSchema: undefined, }, - ["respond"]: { + respond: { parameters: { - "additionalProperties": true, - "properties": { - "output": { - "description": "The result — facts, status, VFS paths to persisted data, whatever the caller needs to act on.", - "type": "string" + additionalProperties: true, + properties: { + output: { + description: + 'The result — facts, status, VFS paths to persisted data, whatever the caller needs to act on.', + type: 'string', + }, + success: { + description: 'Whether the task completed successfully', + type: 'boolean', }, - "success": { - "description": "Whether the task completed successfully", - "type": "boolean" + type: { + description: 'Optional logical result type override', + type: 'string', }, - "type": { - "description": "Optional logical result type override", - "type": "string" - } }, - "required": [ - "output", - "success" - ], - "type": "object" + required: ['output', 'success'], + type: 'object', }, resultSchema: undefined, }, - ["restore_resource"]: { + restore_resource: { parameters: { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "The canonical resource ID to restore." + type: 'object', + properties: { + id: { + type: 'string', + description: 'The canonical resource ID to restore.', + }, + type: { + type: 'string', + description: 'The resource type to restore.', + enum: ['workflow', 'table', 'file', 'knowledgebase', 'folder'], }, - "type": { - "type": "string", - "description": "The resource type to restore.", - "enum": [ - "workflow", - "table", - "file", - "knowledgebase", - "folder" - ] - } }, - "required": [ - "type", - "id" - ] + required: ['type', 'id'], }, resultSchema: undefined, }, - ["revert_to_version"]: { + revert_to_version: { parameters: { - "type": "object", - "properties": { - "version": { - "type": "number", - "description": "The deployment version number to revert to" + type: 'object', + properties: { + version: { + type: 'number', + description: 'The deployment version number to revert to', + }, + workflowId: { + type: 'string', + description: 'The workflow ID', }, - "workflowId": { - "type": "string", - "description": "The workflow ID" - } }, - "required": [ - "workflowId", - "version" - ] + required: ['workflowId', 'version'], }, resultSchema: undefined, }, - ["run"]: { + run: { parameters: { - "properties": { - "context": { - "description": "Pre-gathered context: workflow state, block IDs, input requirements.", - "type": "string" + properties: { + context: { + description: 'Pre-gathered context: workflow state, block IDs, input requirements.', + type: 'string', + }, + request: { + description: 'What to run or what logs to check.', + type: 'string', }, - "request": { - "description": "What to run or what logs to check.", - "type": "string" - } }, - "required": [ - "request" - ], - "type": "object" + required: ['request'], + type: 'object', }, resultSchema: undefined, }, - ["run_block"]: { + run_block: { parameters: { - "type": "object", - "properties": { - "blockId": { - "type": "string", - "description": "The block ID to run in isolation." + type: 'object', + properties: { + blockId: { + type: 'string', + description: 'The block ID to run in isolation.', }, - "executionId": { - "type": "string", - "description": "Optional execution ID to load the snapshot from. Uses latest execution if omitted." + executionId: { + type: 'string', + description: + 'Optional execution ID to load the snapshot from. Uses latest execution if omitted.', }, - "useDeployedState": { - "type": "boolean", - "description": "When true, runs the deployed version instead of the live draft. Default: false (draft)." + useDeployedState: { + type: 'boolean', + description: + 'When true, runs the deployed version instead of the live draft. Default: false (draft).', }, - "workflowId": { - "type": "string", - "description": "Optional workflow ID to run. If not provided, uses the current workflow in context." + workflowId: { + type: 'string', + description: + 'Optional workflow ID to run. If not provided, uses the current workflow in context.', + }, + workflow_input: { + type: 'object', + description: 'JSON object with key-value mappings where each key is an input field name', }, - "workflow_input": { - "type": "object", - "description": "JSON object with key-value mappings where each key is an input field name" - } }, - "required": [ - "blockId" - ] + required: ['blockId'], }, resultSchema: undefined, }, - ["run_from_block"]: { + run_from_block: { parameters: { - "type": "object", - "properties": { - "executionId": { - "type": "string", - "description": "Optional execution ID to load the snapshot from. Uses latest execution if omitted." + type: 'object', + properties: { + executionId: { + type: 'string', + description: + 'Optional execution ID to load the snapshot from. Uses latest execution if omitted.', + }, + startBlockId: { + type: 'string', + description: 'The block ID to start execution from.', }, - "startBlockId": { - "type": "string", - "description": "The block ID to start execution from." + useDeployedState: { + type: 'boolean', + description: + 'When true, runs the deployed version instead of the live draft. Default: false (draft).', }, - "useDeployedState": { - "type": "boolean", - "description": "When true, runs the deployed version instead of the live draft. Default: false (draft)." + workflowId: { + type: 'string', + description: + 'Optional workflow ID to run. If not provided, uses the current workflow in context.', }, - "workflowId": { - "type": "string", - "description": "Optional workflow ID to run. If not provided, uses the current workflow in context." + workflow_input: { + type: 'object', + description: 'JSON object with key-value mappings where each key is an input field name', }, - "workflow_input": { - "type": "object", - "description": "JSON object with key-value mappings where each key is an input field name" - } }, - "required": [ - "startBlockId" - ] + required: ['startBlockId'], }, resultSchema: undefined, }, - ["run_workflow"]: { + run_workflow: { parameters: { - "type": "object", - "properties": { - "triggerBlockId": { - "type": "string", - "description": "Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one." + type: 'object', + properties: { + triggerBlockId: { + type: 'string', + description: + 'Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one.', }, - "useDeployedState": { - "type": "boolean", - "description": "When true, runs the deployed version instead of the live draft. Default: false (draft)." + useDeployedState: { + type: 'boolean', + description: + 'When true, runs the deployed version instead of the live draft. Default: false (draft).', }, - "workflowId": { - "type": "string", - "description": "Optional workflow ID to run. If not provided, uses the current workflow in context." + workflowId: { + type: 'string', + description: + 'Optional workflow ID to run. If not provided, uses the current workflow in context.', + }, + workflow_input: { + type: 'object', + description: 'JSON object with key-value mappings where each key is an input field name', }, - "workflow_input": { - "type": "object", - "description": "JSON object with key-value mappings where each key is an input field name" - } }, - "required": [ - "workflow_input" - ] + required: ['workflow_input'], }, resultSchema: undefined, }, - ["run_workflow_until_block"]: { + run_workflow_until_block: { parameters: { - "type": "object", - "properties": { - "stopAfterBlockId": { - "type": "string", - "description": "The block ID to stop after. Execution halts once this block completes." + type: 'object', + properties: { + stopAfterBlockId: { + type: 'string', + description: 'The block ID to stop after. Execution halts once this block completes.', + }, + triggerBlockId: { + type: 'string', + description: + 'Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one.', }, - "triggerBlockId": { - "type": "string", - "description": "Optional trigger block ID when the workflow has multiple entrypoints and you need to target a specific one." + useDeployedState: { + type: 'boolean', + description: + 'When true, runs the deployed version instead of the live draft. Default: false (draft).', }, - "useDeployedState": { - "type": "boolean", - "description": "When true, runs the deployed version instead of the live draft. Default: false (draft)." + workflowId: { + type: 'string', + description: + 'Optional workflow ID to run. If not provided, uses the current workflow in context.', }, - "workflowId": { - "type": "string", - "description": "Optional workflow ID to run. If not provided, uses the current workflow in context." + workflow_input: { + type: 'object', + description: 'JSON object with key-value mappings where each key is an input field name', }, - "workflow_input": { - "type": "object", - "description": "JSON object with key-value mappings where each key is an input field name" - } }, - "required": [ - "stopAfterBlockId" - ] + required: ['stopAfterBlockId'], }, resultSchema: undefined, }, - ["scrape_page"]: { + scrape_page: { parameters: { - "type": "object", - "properties": { - "include_links": { - "type": "boolean", - "description": "Extract all links from the page (default false)" + type: 'object', + properties: { + include_links: { + type: 'boolean', + description: 'Extract all links from the page (default false)', + }, + url: { + type: 'string', + description: 'The URL to scrape (must include https://)', }, - "url": { - "type": "string", - "description": "The URL to scrape (must include https://)" + wait_for: { + type: 'string', + description: 'CSS selector to wait for before scraping (for JS-heavy pages)', }, - "wait_for": { - "type": "string", - "description": "CSS selector to wait for before scraping (for JS-heavy pages)" - } }, - "required": [ - "url" - ] + required: ['url'], }, resultSchema: undefined, }, - ["search_documentation"]: { + search_documentation: { parameters: { - "type": "object", - "properties": { - "query": { - "type": "string", - "description": "The search query" + type: 'object', + properties: { + query: { + type: 'string', + description: 'The search query', + }, + topK: { + type: 'number', + description: 'Number of results (max 10)', }, - "topK": { - "type": "number", - "description": "Number of results (max 10)" - } }, - "required": [ - "query" - ] + required: ['query'], }, resultSchema: undefined, }, - ["search_library_docs"]: { + search_library_docs: { parameters: { - "type": "object", - "properties": { - "library_name": { - "type": "string", - "description": "Name of the library to search for (e.g., 'nextjs', 'stripe', 'langchain')" + type: 'object', + properties: { + library_name: { + type: 'string', + description: "Name of the library to search for (e.g., 'nextjs', 'stripe', 'langchain')", + }, + query: { + type: 'string', + description: 'The question or topic to find documentation for - be specific', }, - "query": { - "type": "string", - "description": "The question or topic to find documentation for - be specific" + version: { + type: 'string', + description: "Specific version (optional, e.g., '14', 'v2')", }, - "version": { - "type": "string", - "description": "Specific version (optional, e.g., '14', 'v2')" - } }, - "required": [ - "library_name", - "query" - ] + required: ['library_name', 'query'], }, resultSchema: undefined, }, - ["search_online"]: { + search_online: { parameters: { - "type": "object", - "properties": { - "category": { - "type": "string", - "description": "Filter by category", - "enum": [ - "news", - "tweet", - "github", - "paper", - "company", - "research paper", - "linkedin profile", - "pdf", - "personal site" - ] + type: 'object', + properties: { + category: { + type: 'string', + description: 'Filter by category', + enum: [ + 'news', + 'tweet', + 'github', + 'paper', + 'company', + 'research paper', + 'linkedin profile', + 'pdf', + 'personal site', + ], + }, + include_text: { + type: 'boolean', + description: 'Include page text content (default true)', }, - "include_text": { - "type": "boolean", - "description": "Include page text content (default true)" + num_results: { + type: 'number', + description: 'Number of results (default 10, max 25)', }, - "num_results": { - "type": "number", - "description": "Number of results (default 10, max 25)" + query: { + type: 'string', + description: 'Natural language search query', }, - "query": { - "type": "string", - "description": "Natural language search query" + toolTitle: { + type: 'string', + description: + 'Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like "pricing changes" or "Slack webhook docs", not a full sentence like "Searching online for pricing changes".', }, - "toolTitle": { - "type": "string", - "description": "Optional target-only UI phrase for the search row. The UI verb is supplied for you, so pass text like \"pricing changes\" or \"Slack webhook docs\", not a full sentence like \"Searching online for pricing changes\"." - } }, - "required": [ - "query", - "toolTitle" - ] + required: ['query', 'toolTitle'], }, resultSchema: undefined, }, - ["search_patterns"]: { + search_patterns: { parameters: { - "type": "object", - "properties": { - "limit": { - "type": "integer", - "description": "Maximum number of unique pattern examples to return (defaults to 3)." + type: 'object', + properties: { + limit: { + type: 'integer', + description: 'Maximum number of unique pattern examples to return (defaults to 3).', + }, + queries: { + type: 'array', + description: + 'Up to 3 descriptive strings explaining the workflow pattern(s) you need. Focus on intent and desired outcomes.', + items: { + type: 'string', + description: 'Example: "how to automate wealthbox meeting notes into follow-up tasks"', + }, }, - "queries": { - "type": "array", - "description": "Up to 3 descriptive strings explaining the workflow pattern(s) you need. Focus on intent and desired outcomes.", - "items": { - "type": "string", - "description": "Example: \"how to automate wealthbox meeting notes into follow-up tasks\"" - } - } }, - "required": [ - "queries" - ] + required: ['queries'], }, resultSchema: undefined, }, - ["set_block_enabled"]: { + set_block_enabled: { parameters: { - "type": "object", - "properties": { - "blockId": { - "type": "string", - "description": "The block ID whose enabled state should be changed." + type: 'object', + properties: { + blockId: { + type: 'string', + description: 'The block ID whose enabled state should be changed.', + }, + enabled: { + type: 'boolean', + description: 'Set to true to enable the block, or false to disable it.', }, - "enabled": { - "type": "boolean", - "description": "Set to true to enable the block, or false to disable it." + workflowId: { + type: 'string', + description: + 'Optional workflow ID to edit. If not provided, uses the current workflow in context.', }, - "workflowId": { - "type": "string", - "description": "Optional workflow ID to edit. If not provided, uses the current workflow in context." - } }, - "required": [ - "blockId", - "enabled" - ] + required: ['blockId', 'enabled'], }, - resultSchema: undefined, + resultSchema: undefined, }, - ["set_environment_variables"]: { + set_environment_variables: { parameters: { - "type": "object", - "properties": { - "scope": { - "type": "string", - "description": "Whether to set workspace or personal environment variables. Defaults to workspace.", - "enum": [ - "personal", - "workspace" - ], - "default": "workspace" - }, - "variables": { - "type": "array", - "description": "List of env vars to set", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "Variable name" + type: 'object', + properties: { + scope: { + type: 'string', + description: + 'Whether to set workspace or personal environment variables. Defaults to workspace.', + enum: ['personal', 'workspace'], + default: 'workspace', + }, + variables: { + type: 'array', + description: 'List of env vars to set', + items: { + type: 'object', + properties: { + name: { + type: 'string', + description: 'Variable name', }, - "value": { - "type": "string", - "description": "Variable value" - } - }, - "required": [ - "name", - "value" - ] - } - } - }, - "required": [ - "variables" - ] - }, - resultSchema: undefined, - }, - ["set_global_workflow_variables"]: { - parameters: { - "type": "object", - "properties": { - "operations": { - "type": "array", - "description": "List of operations to apply", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" + value: { + type: 'string', + description: 'Variable value', }, - "operation": { - "type": "string", - "enum": [ - "add", - "delete", - "edit" - ] + }, + required: ['name', 'value'], + }, + }, + }, + required: ['variables'], + }, + resultSchema: undefined, + }, + set_global_workflow_variables: { + parameters: { + type: 'object', + properties: { + operations: { + type: 'array', + description: 'List of operations to apply', + items: { + type: 'object', + properties: { + name: { + type: 'string', }, - "type": { - "type": "string", - "enum": [ - "plain", - "number", - "boolean", - "array", - "object" - ] + operation: { + type: 'string', + enum: ['add', 'delete', 'edit'], + }, + type: { + type: 'string', + enum: ['plain', 'number', 'boolean', 'array', 'object'], + }, + value: { + type: 'string', }, - "value": { - "type": "string" - } }, - "required": [ - "operation", - "name", - "type", - "value" - ] - } + required: ['operation', 'name', 'type', 'value'], + }, + }, + workflowId: { + type: 'string', + description: + 'Optional workflow ID. If not provided, uses the current workflow in context.', }, - "workflowId": { - "type": "string", - "description": "Optional workflow ID. If not provided, uses the current workflow in context." - } }, - "required": [ - "operations" - ] + required: ['operations'], }, resultSchema: undefined, }, - ["superagent"]: { + superagent: { parameters: { - "properties": { - "task": { - "description": "A single sentence — the agent has full conversation context. Do NOT pre-read credentials or look up configs. Example: 'send the email we discussed' or 'check my calendar for tomorrow'.", - "type": "string" - } + properties: { + task: { + description: + "A single sentence — the agent has full conversation context. Do NOT pre-read credentials or look up configs. Example: 'send the email we discussed' or 'check my calendar for tomorrow'.", + type: 'string', + }, }, - "required": [ - "task" - ], - "type": "object" + required: ['task'], + type: 'object', }, resultSchema: undefined, }, - ["table"]: { + table: { parameters: { - "properties": { - "request": { - "description": "What table action is needed.", - "type": "string" - } + properties: { + request: { + description: 'What table action is needed.', + type: 'string', + }, }, - "required": [ - "request" - ], - "type": "object" + required: ['request'], + type: 'object', }, resultSchema: undefined, }, - ["tool_search_tool_regex"]: { + tool_search_tool_regex: { parameters: { - "properties": { - "case_insensitive": { - "description": "Whether the regex should be case-insensitive (default true).", - "type": "boolean" + properties: { + case_insensitive: { + description: 'Whether the regex should be case-insensitive (default true).', + type: 'boolean', }, - "max_results": { - "description": "Maximum number of tools to return (optional).", - "type": "integer" + max_results: { + description: 'Maximum number of tools to return (optional).', + type: 'integer', + }, + pattern: { + description: 'Regular expression to match tool names or descriptions.', + type: 'string', }, - "pattern": { - "description": "Regular expression to match tool names or descriptions.", - "type": "string" - } }, - "required": [ - "pattern" - ], - "type": "object" + required: ['pattern'], + type: 'object', }, resultSchema: undefined, }, - ["update_job_history"]: { + update_job_history: { parameters: { - "type": "object", - "properties": { - "jobId": { - "type": "string", - "description": "The job ID." - }, - "summary": { - "type": "string", - "description": "A concise summary of what was done this run (e.g., 'Sent follow-up emails to 3 leads: Alice, Bob, Carol')." - } + type: 'object', + properties: { + jobId: { + type: 'string', + description: 'The job ID.', + }, + summary: { + type: 'string', + description: + "A concise summary of what was done this run (e.g., 'Sent follow-up emails to 3 leads: Alice, Bob, Carol').", + }, }, - "required": [ - "jobId", - "summary" - ] + required: ['jobId', 'summary'], }, resultSchema: undefined, }, - ["update_workspace_mcp_server"]: { + update_workspace_mcp_server: { parameters: { - "type": "object", - "properties": { - "description": { - "type": "string", - "description": "New description for the server" + type: 'object', + properties: { + description: { + type: 'string', + description: 'New description for the server', + }, + isPublic: { + type: 'boolean', + description: 'Whether the server is publicly accessible', }, - "isPublic": { - "type": "boolean", - "description": "Whether the server is publicly accessible" + name: { + type: 'string', + description: 'New name for the server', }, - "name": { - "type": "string", - "description": "New name for the server" + serverId: { + type: 'string', + description: 'Required: the MCP server ID to update', }, - "serverId": { - "type": "string", - "description": "Required: the MCP server ID to update" - } }, - "required": [ - "serverId" - ] + required: ['serverId'], }, resultSchema: undefined, }, - ["user_memory"]: { + user_memory: { parameters: { - "type": "object", - "properties": { - "confidence": { - "type": "number", - "description": "Confidence level 0-1 (default 1.0 for explicit, 0.8 for inferred)" + type: 'object', + properties: { + confidence: { + type: 'number', + description: 'Confidence level 0-1 (default 1.0 for explicit, 0.8 for inferred)', + }, + correct_value: { + type: 'string', + description: "The correct value to replace the wrong one (for 'correct' operation)", + }, + key: { + type: 'string', + description: "Unique key for the memory (e.g., 'preferred_model', 'slack_credential')", }, - "correct_value": { - "type": "string", - "description": "The correct value to replace the wrong one (for 'correct' operation)" + limit: { + type: 'number', + description: 'Number of results for search (default 10)', }, - "key": { - "type": "string", - "description": "Unique key for the memory (e.g., 'preferred_model', 'slack_credential')" + memory_type: { + type: 'string', + description: "Type of memory: 'preference', 'entity', 'history', or 'correction'", + enum: ['preference', 'entity', 'history', 'correction'], + }, + operation: { + type: 'string', + description: "Operation: 'add', 'search', 'delete', 'correct', or 'list'", + enum: ['add', 'search', 'delete', 'correct', 'list'], + }, + query: { + type: 'string', + description: 'Search query to find relevant memories', + }, + source: { + type: 'string', + description: "Source: 'explicit' (user told you) or 'inferred' (you observed)", + enum: ['explicit', 'inferred'], + }, + value: { + type: 'string', + description: 'Value to remember', + }, + }, + required: ['operation'], + }, + resultSchema: undefined, + }, + user_table: { + parameters: { + type: 'object', + properties: { + args: { + type: 'object', + description: 'Arguments for the operation', + properties: { + column: { + type: 'object', + description: 'Column definition for add_column: { name, type, unique?, position? }', + }, + columnName: { + type: 'string', + description: + 'Column name (required for rename_column, update_column; use columnNames array for batch delete_column)', + }, + columnNames: { + type: 'array', + description: + 'Array of column names to delete at once (for delete_column). Preferred over columnName when deleting multiple columns.', + }, + data: { + type: 'object', + description: 'Row data as key-value pairs (required for insert_row, update_row)', + }, + description: { + type: 'string', + description: "Table description (optional for 'create')", + }, + fileId: { + type: 'string', + description: + 'Canonical workspace file ID for create_from_file/import_file. Discover via read("files/{name}/meta.json") or glob("files/by-id/*/meta.json").', + }, + filePath: { + type: 'string', + description: + 'Legacy workspace file reference for create_from_file/import_file. Prefer fileId.', + }, + filter: { + type: 'object', + description: + 'MongoDB-style filter for query_rows, update_rows_by_filter, delete_rows_by_filter', + }, + limit: { + type: 'number', + description: 'Maximum rows to return or affect (optional, default 100)', + }, + name: { + type: 'string', + description: "Table name (required for 'create')", + }, + newName: { + type: 'string', + description: 'New column name (required for rename_column)', + }, + newType: { + type: 'string', + description: + 'New column type (optional for update_column). Types: string, number, boolean, date, json', + }, + offset: { + type: 'number', + description: 'Number of rows to skip (optional for query_rows, default 0)', + }, + outputFormat: { + type: 'string', + description: + 'Explicit format override for outputPath. Usually unnecessary — the file extension determines the format automatically. Only use this to force a different format than what the extension implies.', + enum: ['json', 'csv', 'txt', 'md', 'html'], + }, + outputPath: { + type: 'string', + description: + 'Pipe query_rows results directly to a NEW workspace file. The format is auto-inferred from the file extension: .csv → CSV, .json → JSON, .md → Markdown, etc. Use .csv for tabular exports. Use a flat path like "files/export.csv" — nested paths are not supported.', + }, + rowId: { + type: 'string', + description: 'Row ID (required for get_row, update_row, delete_row)', + }, + rowIds: { + type: 'array', + description: 'Array of row IDs to delete (for batch_delete_rows)', + }, + rows: { + type: 'array', + description: 'Array of row data objects (required for batch_insert_rows)', + }, + schema: { + type: 'object', + description: + "Table schema with columns array (required for 'create'). Each column: { name, type, unique? }", + }, + sort: { + type: 'object', + description: + "Sort specification as { field: 'asc' | 'desc' } (optional for query_rows)", + }, + tableId: { + type: 'string', + description: + "Table ID (required for most operations except 'create' and batch 'delete')", + }, + tableIds: { + type: 'array', + description: 'Array of table IDs (for batch delete)', + items: { + type: 'string', + }, + }, + unique: { + type: 'boolean', + description: 'Set column unique constraint (optional for update_column)', + }, + updates: { + type: 'array', + description: + 'Array of per-row updates: [{ rowId, data: { col: val } }] (for batch_update_rows)', + }, + values: { + type: 'object', + description: + 'Map of rowId to value for single-column batch update: { "rowId1": val1, "rowId2": val2 } (for batch_update_rows with columnName)', + }, + }, }, - "limit": { - "type": "number", - "description": "Number of results for search (default 10)" + operation: { + type: 'string', + description: 'The operation to perform', + enum: [ + 'create', + 'create_from_file', + 'import_file', + 'get', + 'get_schema', + 'delete', + 'insert_row', + 'batch_insert_rows', + 'get_row', + 'query_rows', + 'update_row', + 'delete_row', + 'update_rows_by_filter', + 'delete_rows_by_filter', + 'batch_update_rows', + 'batch_delete_rows', + 'add_column', + 'rename_column', + 'delete_column', + 'update_column', + ], }, - "memory_type": { - "type": "string", - "description": "Type of memory: 'preference', 'entity', 'history', or 'correction'", - "enum": [ - "preference", - "entity", - "history", - "correction" - ] - }, - "operation": { - "type": "string", - "description": "Operation: 'add', 'search', 'delete', 'correct', or 'list'", - "enum": [ - "add", - "search", - "delete", - "correct", - "list" - ] - }, - "query": { - "type": "string", - "description": "Search query to find relevant memories" - }, - "source": { - "type": "string", - "description": "Source: 'explicit' (user told you) or 'inferred' (you observed)", - "enum": [ - "explicit", - "inferred" - ] - }, - "value": { - "type": "string", - "description": "Value to remember" - } - }, - "required": [ - "operation" - ] - }, - resultSchema: undefined, - }, - ["user_table"]: { - parameters: { - "type": "object", - "properties": { - "args": { - "type": "object", - "description": "Arguments for the operation", - "properties": { - "column": { - "type": "object", - "description": "Column definition for add_column: { name, type, unique?, position? }" - }, - "columnName": { - "type": "string", - "description": "Column name (required for rename_column, update_column; use columnNames array for batch delete_column)" - }, - "columnNames": { - "type": "array", - "description": "Array of column names to delete at once (for delete_column). Preferred over columnName when deleting multiple columns." - }, - "data": { - "type": "object", - "description": "Row data as key-value pairs (required for insert_row, update_row)" - }, - "description": { - "type": "string", - "description": "Table description (optional for 'create')" - }, - "fileId": { - "type": "string", - "description": "Canonical workspace file ID for create_from_file/import_file. Discover via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\")." - }, - "filePath": { - "type": "string", - "description": "Legacy workspace file reference for create_from_file/import_file. Prefer fileId." - }, - "filter": { - "type": "object", - "description": "MongoDB-style filter for query_rows, update_rows_by_filter, delete_rows_by_filter" - }, - "limit": { - "type": "number", - "description": "Maximum rows to return or affect (optional, default 100)" - }, - "name": { - "type": "string", - "description": "Table name (required for 'create')" - }, - "newName": { - "type": "string", - "description": "New column name (required for rename_column)" - }, - "newType": { - "type": "string", - "description": "New column type (optional for update_column). Types: string, number, boolean, date, json" - }, - "offset": { - "type": "number", - "description": "Number of rows to skip (optional for query_rows, default 0)" - }, - "outputFormat": { - "type": "string", - "description": "Explicit format override for outputPath. Usually unnecessary — the file extension determines the format automatically. Only use this to force a different format than what the extension implies.", - "enum": [ - "json", - "csv", - "txt", - "md", - "html" - ] - }, - "outputPath": { - "type": "string", - "description": "Pipe query_rows results directly to a NEW workspace file. The format is auto-inferred from the file extension: .csv → CSV, .json → JSON, .md → Markdown, etc. Use .csv for tabular exports. Use a flat path like \"files/export.csv\" — nested paths are not supported." - }, - "rowId": { - "type": "string", - "description": "Row ID (required for get_row, update_row, delete_row)" - }, - "rowIds": { - "type": "array", - "description": "Array of row IDs to delete (for batch_delete_rows)" - }, - "rows": { - "type": "array", - "description": "Array of row data objects (required for batch_insert_rows)" - }, - "schema": { - "type": "object", - "description": "Table schema with columns array (required for 'create'). Each column: { name, type, unique? }" - }, - "sort": { - "type": "object", - "description": "Sort specification as { field: 'asc' | 'desc' } (optional for query_rows)" - }, - "tableId": { - "type": "string", - "description": "Table ID (required for most operations except 'create' and batch 'delete')" - }, - "tableIds": { - "type": "array", - "description": "Array of table IDs (for batch delete)", - "items": { - "type": "string" - } - }, - "unique": { - "type": "boolean", - "description": "Set column unique constraint (optional for update_column)" - }, - "updates": { - "type": "array", - "description": "Array of per-row updates: [{ rowId, data: { col: val } }] (for batch_update_rows)" - }, - "values": { - "type": "object", - "description": "Map of rowId to value for single-column batch update: { \"rowId1\": val1, \"rowId2\": val2 } (for batch_update_rows with columnName)" - } - } - }, - "operation": { - "type": "string", - "description": "The operation to perform", - "enum": [ - "create", - "create_from_file", - "import_file", - "get", - "get_schema", - "delete", - "insert_row", - "batch_insert_rows", - "get_row", - "query_rows", - "update_row", - "delete_row", - "update_rows_by_filter", - "delete_rows_by_filter", - "batch_update_rows", - "batch_delete_rows", - "add_column", - "rename_column", - "delete_column", - "update_column" - ] - } - }, - "required": [ - "operation", - "args" - ] + }, + required: ['operation', 'args'], }, resultSchema: { - "type": "object", - "properties": { - "data": { - "type": "object", - "description": "Operation-specific result payload." - }, - "message": { - "type": "string", - "description": "Human-readable outcome summary." - }, - "success": { - "type": "boolean", - "description": "Whether the operation succeeded." - } - }, - "required": [ - "success", - "message" - ] - }, - }, - ["workflow"]: { - parameters: { - "type": "object" - }, - resultSchema: undefined, - }, - ["workspace_file"]: { - parameters: { - "type": "object", - "properties": { - "operation": { - "type": "string", - "description": "The file operation to perform.", - "enum": [ - "append", - "update", - "patch" - ] - }, - "target": { - "type": "object", - "description": "Explicit file target. Use kind=file_id + fileId for existing files.", - "properties": { - "fileId": { - "type": "string", - "description": "Canonical existing workspace file ID. Required when target.kind=file_id." - }, - "fileName": { - "type": "string", - "description": "Plain workspace filename including extension, e.g. \"main.py\" or \"report.docx\". Required when target.kind=new_file." - }, - "kind": { - "type": "string", - "description": "How the file target is identified.", - "enum": [ - "new_file", - "file_id" - ] - } + type: 'object', + properties: { + data: { + type: 'object', + description: 'Operation-specific result payload.', + }, + message: { + type: 'string', + description: 'Human-readable outcome summary.', + }, + success: { + type: 'boolean', + description: 'Whether the operation succeeded.', + }, + }, + required: ['success', 'message'], + }, + }, + workflow: { + parameters: { + type: 'object', + }, + resultSchema: undefined, + }, + workspace_file: { + parameters: { + type: 'object', + properties: { + operation: { + type: 'string', + description: 'The file operation to perform.', + enum: ['append', 'update', 'patch'], + }, + target: { + type: 'object', + description: 'Explicit file target. Use kind=file_id + fileId for existing files.', + properties: { + fileId: { + type: 'string', + description: + 'Canonical existing workspace file ID. Required when target.kind=file_id.', + }, + fileName: { + type: 'string', + description: + 'Plain workspace filename including extension, e.g. "main.py" or "report.docx". Required when target.kind=new_file.', + }, + kind: { + type: 'string', + description: 'How the file target is identified.', + enum: ['new_file', 'file_id'], + }, + }, + required: ['kind'], + }, + title: { + type: 'string', + description: + 'Required short UI label for this content unit, e.g. "Chapter 1", "Slide 3", or "Fix footer spacing".', + }, + contentType: { + type: 'string', + description: + 'Optional MIME type override. Usually omit and let the system infer from the target file extension.', + enum: [ + 'text/markdown', + 'text/html', + 'text/plain', + 'application/json', + 'text/csv', + 'application/vnd.openxmlformats-officedocument.presentationml.presentation', + 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', + 'application/pdf', + ], + }, + edit: { + type: 'object', + description: + 'Patch metadata. Use strategy=search_replace for exact text replacement, or strategy=anchored for line-based inserts/replacements/deletions. The actual replacement/insert content is provided via the paired edit_content tool call.', + properties: { + after_anchor: { + type: 'string', + description: + 'Boundary line kept after inserted replacement content. Required for mode=replace_between.', + }, + anchor: { + type: 'string', + description: + 'Anchor line after which new content is inserted. Required for mode=insert_after.', + }, + before_anchor: { + type: 'string', + description: + 'Boundary line kept before inserted replacement content. Required for mode=replace_between.', + }, + end_anchor: { + type: 'string', + description: 'First line to keep after deletion. Required for mode=delete_between.', + }, + mode: { + type: 'string', + description: 'Anchored edit mode when strategy=anchored.', + enum: ['replace_between', 'insert_after', 'delete_between'], + }, + occurrence: { + type: 'number', + description: '1-based occurrence for repeated anchor lines. Optional; defaults to 1.', + }, + replaceAll: { + type: 'boolean', + description: + 'When true and strategy=search_replace, replace every match instead of requiring a unique single match.', + }, + search: { + type: 'string', + description: + 'Exact text to find when strategy=search_replace. Must match exactly once unless replaceAll=true.', + }, + start_anchor: { + type: 'string', + description: 'First line to delete. Required for mode=delete_between.', + }, + strategy: { + type: 'string', + description: 'Patch strategy.', + enum: ['search_replace', 'anchored'], + }, }, - "required": [ - "kind" - ] - }, - "title": { - "type": "string", - "description": "Required short UI label for this content unit, e.g. \"Chapter 1\", \"Slide 3\", or \"Fix footer spacing\"." - }, - "contentType": { - "type": "string", - "description": "Optional MIME type override. Usually omit and let the system infer from the target file extension.", - "enum": [ - "text/markdown", - "text/html", - "text/plain", - "application/json", - "text/csv", - "application/vnd.openxmlformats-officedocument.presentationml.presentation", - "application/vnd.openxmlformats-officedocument.wordprocessingml.document", - "application/pdf" - ] - }, - "edit": { - "type": "object", - "description": "Patch metadata. Use strategy=search_replace for exact text replacement, or strategy=anchored for line-based inserts/replacements/deletions. The actual replacement/insert content is provided via the paired edit_content tool call.", - "properties": { - "after_anchor": { - "type": "string", - "description": "Boundary line kept after inserted replacement content. Required for mode=replace_between." - }, - "anchor": { - "type": "string", - "description": "Anchor line after which new content is inserted. Required for mode=insert_after." - }, - "before_anchor": { - "type": "string", - "description": "Boundary line kept before inserted replacement content. Required for mode=replace_between." - }, - "end_anchor": { - "type": "string", - "description": "First line to keep after deletion. Required for mode=delete_between." - }, - "mode": { - "type": "string", - "description": "Anchored edit mode when strategy=anchored.", - "enum": [ - "replace_between", - "insert_after", - "delete_between" - ] - }, - "occurrence": { - "type": "number", - "description": "1-based occurrence for repeated anchor lines. Optional; defaults to 1." - }, - "replaceAll": { - "type": "boolean", - "description": "When true and strategy=search_replace, replace every match instead of requiring a unique single match." - }, - "search": { - "type": "string", - "description": "Exact text to find when strategy=search_replace. Must match exactly once unless replaceAll=true." - }, - "start_anchor": { - "type": "string", - "description": "First line to delete. Required for mode=delete_between." - }, - "strategy": { - "type": "string", - "description": "Patch strategy.", - "enum": [ - "search_replace", - "anchored" - ] - } - } - }, - "newName": { - "type": "string", - "description": "New file name for rename. Must be a plain workspace filename like \"main.py\"." - } - }, - "required": [ - "operation", - "target", - "title" - ] + }, + newName: { + type: 'string', + description: + 'New file name for rename. Must be a plain workspace filename like "main.py".', + }, + }, + required: ['operation', 'target', 'title'], }, resultSchema: { - "type": "object", - "properties": { - "data": { - "type": "object", - "description": "Optional operation metadata such as file id, file name, size, and content type." - }, - "message": { - "type": "string", - "description": "Human-readable summary of the outcome." - }, - "success": { - "type": "boolean", - "description": "Whether the file operation succeeded." - } - }, - "required": [ - "success", - "message" - ] + type: 'object', + properties: { + data: { + type: 'object', + description: + 'Optional operation metadata such as file id, file name, size, and content type.', + }, + message: { + type: 'string', + description: 'Human-readable summary of the outcome.', + }, + success: { + type: 'boolean', + description: 'Whether the file operation succeeded.', + }, + }, + required: ['success', 'message'], }, }, } diff --git a/apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts b/apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts index 29441128eba..2040cbc8636 100644 --- a/apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts +++ b/apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts @@ -10,342 +10,349 @@ // remains the single source of truth. export const AbortBackend = { - InProcess: "in_process", - Redis: "redis", -} as const; + InProcess: 'in_process', + Redis: 'redis', +} as const -export type AbortBackendKey = keyof typeof AbortBackend; -export type AbortBackendValue = (typeof AbortBackend)[AbortBackendKey]; +export type AbortBackendKey = keyof typeof AbortBackend +export type AbortBackendValue = (typeof AbortBackend)[AbortBackendKey] export const AbortRedisResult = { - Error: "error", - Ok: "ok", - Slow: "slow", -} as const; + Error: 'error', + Ok: 'ok', + Slow: 'slow', +} as const -export type AbortRedisResultKey = keyof typeof AbortRedisResult; -export type AbortRedisResultValue = (typeof AbortRedisResult)[AbortRedisResultKey]; +export type AbortRedisResultKey = keyof typeof AbortRedisResult +export type AbortRedisResultValue = (typeof AbortRedisResult)[AbortRedisResultKey] export const AuthKeyMatch = { - Enterprise: "enterprise", - None: "none", - User: "user", -} as const; + Enterprise: 'enterprise', + None: 'none', + User: 'user', +} as const -export type AuthKeyMatchKey = keyof typeof AuthKeyMatch; -export type AuthKeyMatchValue = (typeof AuthKeyMatch)[AuthKeyMatchKey]; +export type AuthKeyMatchKey = keyof typeof AuthKeyMatch +export type AuthKeyMatchValue = (typeof AuthKeyMatch)[AuthKeyMatchKey] export const BillingAnalyticsOutcome = { - Duplicate: "duplicate", - RetriesExhausted: "retries_exhausted", - Success: "success", - Unknown: "unknown", -} as const; + Duplicate: 'duplicate', + RetriesExhausted: 'retries_exhausted', + Success: 'success', + Unknown: 'unknown', +} as const -export type BillingAnalyticsOutcomeKey = keyof typeof BillingAnalyticsOutcome; -export type BillingAnalyticsOutcomeValue = (typeof BillingAnalyticsOutcome)[BillingAnalyticsOutcomeKey]; +export type BillingAnalyticsOutcomeKey = keyof typeof BillingAnalyticsOutcome +export type BillingAnalyticsOutcomeValue = + (typeof BillingAnalyticsOutcome)[BillingAnalyticsOutcomeKey] export const BillingFlushOutcome = { - CheckpointAlreadyClaimed: "checkpoint_already_claimed", - CheckpointLoadFailed: "checkpoint_load_failed", - Flushed: "flushed", - NoCheckpoint: "no_checkpoint", - NoSnapshot: "no_snapshot", - SkippedUnconfigured: "skipped_unconfigured", -} as const; + CheckpointAlreadyClaimed: 'checkpoint_already_claimed', + CheckpointLoadFailed: 'checkpoint_load_failed', + Flushed: 'flushed', + NoCheckpoint: 'no_checkpoint', + NoSnapshot: 'no_snapshot', + SkippedUnconfigured: 'skipped_unconfigured', +} as const -export type BillingFlushOutcomeKey = keyof typeof BillingFlushOutcome; -export type BillingFlushOutcomeValue = (typeof BillingFlushOutcome)[BillingFlushOutcomeKey]; +export type BillingFlushOutcomeKey = keyof typeof BillingFlushOutcome +export type BillingFlushOutcomeValue = (typeof BillingFlushOutcome)[BillingFlushOutcomeKey] export const BillingRouteOutcome = { - AuthFailed: "auth_failed", - Billed: "billed", - BillingDisabled: "billing_disabled", - DuplicateIdempotencyKey: "duplicate_idempotency_key", - InternalError: "internal_error", - InvalidBody: "invalid_body", -} as const; + AuthFailed: 'auth_failed', + Billed: 'billed', + BillingDisabled: 'billing_disabled', + DuplicateIdempotencyKey: 'duplicate_idempotency_key', + InternalError: 'internal_error', + InvalidBody: 'invalid_body', +} as const -export type BillingRouteOutcomeKey = keyof typeof BillingRouteOutcome; -export type BillingRouteOutcomeValue = (typeof BillingRouteOutcome)[BillingRouteOutcomeKey]; +export type BillingRouteOutcomeKey = keyof typeof BillingRouteOutcome +export type BillingRouteOutcomeValue = (typeof BillingRouteOutcome)[BillingRouteOutcomeKey] export const CopilotAbortOutcome = { - BadRequest: "bad_request", - FallbackPersistFailed: "fallback_persist_failed", - MissingMessageId: "missing_message_id", - MissingStreamId: "missing_stream_id", - NoChatId: "no_chat_id", - Ok: "ok", - SettleTimeout: "settle_timeout", - Settled: "settled", - Unauthorized: "unauthorized", -} as const; - -export type CopilotAbortOutcomeKey = keyof typeof CopilotAbortOutcome; -export type CopilotAbortOutcomeValue = (typeof CopilotAbortOutcome)[CopilotAbortOutcomeKey]; + BadRequest: 'bad_request', + FallbackPersistFailed: 'fallback_persist_failed', + MissingMessageId: 'missing_message_id', + MissingStreamId: 'missing_stream_id', + NoChatId: 'no_chat_id', + Ok: 'ok', + SettleTimeout: 'settle_timeout', + Settled: 'settled', + Unauthorized: 'unauthorized', +} as const + +export type CopilotAbortOutcomeKey = keyof typeof CopilotAbortOutcome +export type CopilotAbortOutcomeValue = (typeof CopilotAbortOutcome)[CopilotAbortOutcomeKey] export const CopilotBranchKind = { - Workflow: "workflow", - Workspace: "workspace", -} as const; + Workflow: 'workflow', + Workspace: 'workspace', +} as const -export type CopilotBranchKindKey = keyof typeof CopilotBranchKind; -export type CopilotBranchKindValue = (typeof CopilotBranchKind)[CopilotBranchKindKey]; +export type CopilotBranchKindKey = keyof typeof CopilotBranchKind +export type CopilotBranchKindValue = (typeof CopilotBranchKind)[CopilotBranchKindKey] export const CopilotChatFinalizeOutcome = { - AppendedAssistant: "appended_assistant", - AssistantAlreadyPersisted: "assistant_already_persisted", - ClearedStreamMarkerOnly: "cleared_stream_marker_only", - StaleUserMessage: "stale_user_message", -} as const; + AppendedAssistant: 'appended_assistant', + AssistantAlreadyPersisted: 'assistant_already_persisted', + ClearedStreamMarkerOnly: 'cleared_stream_marker_only', + StaleUserMessage: 'stale_user_message', +} as const -export type CopilotChatFinalizeOutcomeKey = keyof typeof CopilotChatFinalizeOutcome; -export type CopilotChatFinalizeOutcomeValue = (typeof CopilotChatFinalizeOutcome)[CopilotChatFinalizeOutcomeKey]; +export type CopilotChatFinalizeOutcomeKey = keyof typeof CopilotChatFinalizeOutcome +export type CopilotChatFinalizeOutcomeValue = + (typeof CopilotChatFinalizeOutcome)[CopilotChatFinalizeOutcomeKey] export const CopilotChatPersistOutcome = { - Appended: "appended", - ChatNotFound: "chat_not_found", -} as const; + Appended: 'appended', + ChatNotFound: 'chat_not_found', +} as const -export type CopilotChatPersistOutcomeKey = keyof typeof CopilotChatPersistOutcome; -export type CopilotChatPersistOutcomeValue = (typeof CopilotChatPersistOutcome)[CopilotChatPersistOutcomeKey]; +export type CopilotChatPersistOutcomeKey = keyof typeof CopilotChatPersistOutcome +export type CopilotChatPersistOutcomeValue = + (typeof CopilotChatPersistOutcome)[CopilotChatPersistOutcomeKey] export const CopilotConfirmOutcome = { - Delivered: "delivered", - Forbidden: "forbidden", - InternalError: "internal_error", - RunNotFound: "run_not_found", - ToolCallNotFound: "tool_call_not_found", - Unauthorized: "unauthorized", - UpdateFailed: "update_failed", - ValidationError: "validation_error", -} as const; - -export type CopilotConfirmOutcomeKey = keyof typeof CopilotConfirmOutcome; -export type CopilotConfirmOutcomeValue = (typeof CopilotConfirmOutcome)[CopilotConfirmOutcomeKey]; + Delivered: 'delivered', + Forbidden: 'forbidden', + InternalError: 'internal_error', + RunNotFound: 'run_not_found', + ToolCallNotFound: 'tool_call_not_found', + Unauthorized: 'unauthorized', + UpdateFailed: 'update_failed', + ValidationError: 'validation_error', +} as const + +export type CopilotConfirmOutcomeKey = keyof typeof CopilotConfirmOutcome +export type CopilotConfirmOutcomeValue = (typeof CopilotConfirmOutcome)[CopilotConfirmOutcomeKey] export const CopilotFinalizeOutcome = { - Aborted: "aborted", - Error: "error", - Success: "success", -} as const; + Aborted: 'aborted', + Error: 'error', + Success: 'success', +} as const -export type CopilotFinalizeOutcomeKey = keyof typeof CopilotFinalizeOutcome; -export type CopilotFinalizeOutcomeValue = (typeof CopilotFinalizeOutcome)[CopilotFinalizeOutcomeKey]; +export type CopilotFinalizeOutcomeKey = keyof typeof CopilotFinalizeOutcome +export type CopilotFinalizeOutcomeValue = (typeof CopilotFinalizeOutcome)[CopilotFinalizeOutcomeKey] export const CopilotLeg = { - SimToGo: "sim_to_go", -} as const; + SimToGo: 'sim_to_go', +} as const -export type CopilotLegKey = keyof typeof CopilotLeg; -export type CopilotLegValue = (typeof CopilotLeg)[CopilotLegKey]; +export type CopilotLegKey = keyof typeof CopilotLeg +export type CopilotLegValue = (typeof CopilotLeg)[CopilotLegKey] export const CopilotOutputFileOutcome = { - Failed: "failed", - Uploaded: "uploaded", -} as const; + Failed: 'failed', + Uploaded: 'uploaded', +} as const -export type CopilotOutputFileOutcomeKey = keyof typeof CopilotOutputFileOutcome; -export type CopilotOutputFileOutcomeValue = (typeof CopilotOutputFileOutcome)[CopilotOutputFileOutcomeKey]; +export type CopilotOutputFileOutcomeKey = keyof typeof CopilotOutputFileOutcome +export type CopilotOutputFileOutcomeValue = + (typeof CopilotOutputFileOutcome)[CopilotOutputFileOutcomeKey] export const CopilotRecoveryOutcome = { - GapDetected: "gap_detected", - InRange: "in_range", -} as const; + GapDetected: 'gap_detected', + InRange: 'in_range', +} as const -export type CopilotRecoveryOutcomeKey = keyof typeof CopilotRecoveryOutcome; -export type CopilotRecoveryOutcomeValue = (typeof CopilotRecoveryOutcome)[CopilotRecoveryOutcomeKey]; +export type CopilotRecoveryOutcomeKey = keyof typeof CopilotRecoveryOutcome +export type CopilotRecoveryOutcomeValue = (typeof CopilotRecoveryOutcome)[CopilotRecoveryOutcomeKey] export const CopilotRequestCancelReason = { - ClientDisconnect: "client_disconnect", - ExplicitStop: "explicit_stop", - Timeout: "timeout", - Unknown: "unknown", -} as const; + ClientDisconnect: 'client_disconnect', + ExplicitStop: 'explicit_stop', + Timeout: 'timeout', + Unknown: 'unknown', +} as const -export type CopilotRequestCancelReasonKey = keyof typeof CopilotRequestCancelReason; -export type CopilotRequestCancelReasonValue = (typeof CopilotRequestCancelReason)[CopilotRequestCancelReasonKey]; +export type CopilotRequestCancelReasonKey = keyof typeof CopilotRequestCancelReason +export type CopilotRequestCancelReasonValue = + (typeof CopilotRequestCancelReason)[CopilotRequestCancelReasonKey] export const CopilotResourcesOp = { - Delete: "delete", - None: "none", - Upsert: "upsert", -} as const; + Delete: 'delete', + None: 'none', + Upsert: 'upsert', +} as const -export type CopilotResourcesOpKey = keyof typeof CopilotResourcesOp; -export type CopilotResourcesOpValue = (typeof CopilotResourcesOp)[CopilotResourcesOpKey]; +export type CopilotResourcesOpKey = keyof typeof CopilotResourcesOp +export type CopilotResourcesOpValue = (typeof CopilotResourcesOp)[CopilotResourcesOpKey] export const CopilotResumeOutcome = { - BatchDelivered: "batch_delivered", - ClientDisconnected: "client_disconnected", - EndedWithoutTerminal: "ended_without_terminal", - StreamNotFound: "stream_not_found", - TerminalDelivered: "terminal_delivered", -} as const; + BatchDelivered: 'batch_delivered', + ClientDisconnected: 'client_disconnected', + EndedWithoutTerminal: 'ended_without_terminal', + StreamNotFound: 'stream_not_found', + TerminalDelivered: 'terminal_delivered', +} as const -export type CopilotResumeOutcomeKey = keyof typeof CopilotResumeOutcome; -export type CopilotResumeOutcomeValue = (typeof CopilotResumeOutcome)[CopilotResumeOutcomeKey]; +export type CopilotResumeOutcomeKey = keyof typeof CopilotResumeOutcome +export type CopilotResumeOutcomeValue = (typeof CopilotResumeOutcome)[CopilotResumeOutcomeKey] export const CopilotStopOutcome = { - ChatNotFound: "chat_not_found", - InternalError: "internal_error", - NoMatchingRow: "no_matching_row", - Persisted: "persisted", - Unauthorized: "unauthorized", - ValidationError: "validation_error", -} as const; + ChatNotFound: 'chat_not_found', + InternalError: 'internal_error', + NoMatchingRow: 'no_matching_row', + Persisted: 'persisted', + Unauthorized: 'unauthorized', + ValidationError: 'validation_error', +} as const -export type CopilotStopOutcomeKey = keyof typeof CopilotStopOutcome; -export type CopilotStopOutcomeValue = (typeof CopilotStopOutcome)[CopilotStopOutcomeKey]; +export type CopilotStopOutcomeKey = keyof typeof CopilotStopOutcome +export type CopilotStopOutcomeValue = (typeof CopilotStopOutcome)[CopilotStopOutcomeKey] export const CopilotSurface = { - Copilot: "copilot", - Mothership: "mothership", -} as const; + Copilot: 'copilot', + Mothership: 'mothership', +} as const -export type CopilotSurfaceKey = keyof typeof CopilotSurface; -export type CopilotSurfaceValue = (typeof CopilotSurface)[CopilotSurfaceKey]; +export type CopilotSurfaceKey = keyof typeof CopilotSurface +export type CopilotSurfaceValue = (typeof CopilotSurface)[CopilotSurfaceKey] export const CopilotTableOutcome = { - EmptyContent: "empty_content", - EmptyRows: "empty_rows", - Failed: "failed", - Imported: "imported", - InvalidJsonShape: "invalid_json_shape", - InvalidShape: "invalid_shape", - RowLimitExceeded: "row_limit_exceeded", - TableNotFound: "table_not_found", - Wrote: "wrote", -} as const; - -export type CopilotTableOutcomeKey = keyof typeof CopilotTableOutcome; -export type CopilotTableOutcomeValue = (typeof CopilotTableOutcome)[CopilotTableOutcomeKey]; + EmptyContent: 'empty_content', + EmptyRows: 'empty_rows', + Failed: 'failed', + Imported: 'imported', + InvalidJsonShape: 'invalid_json_shape', + InvalidShape: 'invalid_shape', + RowLimitExceeded: 'row_limit_exceeded', + TableNotFound: 'table_not_found', + Wrote: 'wrote', +} as const + +export type CopilotTableOutcomeKey = keyof typeof CopilotTableOutcome +export type CopilotTableOutcomeValue = (typeof CopilotTableOutcome)[CopilotTableOutcomeKey] export const CopilotTableSourceFormat = { - Csv: "csv", - Json: "json", -} as const; + Csv: 'csv', + Json: 'json', +} as const -export type CopilotTableSourceFormatKey = keyof typeof CopilotTableSourceFormat; -export type CopilotTableSourceFormatValue = (typeof CopilotTableSourceFormat)[CopilotTableSourceFormatKey]; +export type CopilotTableSourceFormatKey = keyof typeof CopilotTableSourceFormat +export type CopilotTableSourceFormatValue = + (typeof CopilotTableSourceFormat)[CopilotTableSourceFormatKey] export const CopilotTransport = { - Batch: "batch", - Headless: "headless", - Stream: "stream", -} as const; + Batch: 'batch', + Headless: 'headless', + Stream: 'stream', +} as const -export type CopilotTransportKey = keyof typeof CopilotTransport; -export type CopilotTransportValue = (typeof CopilotTransport)[CopilotTransportKey]; +export type CopilotTransportKey = keyof typeof CopilotTransport +export type CopilotTransportValue = (typeof CopilotTransport)[CopilotTransportKey] export const CopilotValidateOutcome = { - InternalAuthFailed: "internal_auth_failed", - InternalError: "internal_error", - InvalidBody: "invalid_body", - Ok: "ok", - UsageExceeded: "usage_exceeded", - UserNotFound: "user_not_found", -} as const; + InternalAuthFailed: 'internal_auth_failed', + InternalError: 'internal_error', + InvalidBody: 'invalid_body', + Ok: 'ok', + UsageExceeded: 'usage_exceeded', + UserNotFound: 'user_not_found', +} as const -export type CopilotValidateOutcomeKey = keyof typeof CopilotValidateOutcome; -export type CopilotValidateOutcomeValue = (typeof CopilotValidateOutcome)[CopilotValidateOutcomeKey]; +export type CopilotValidateOutcomeKey = keyof typeof CopilotValidateOutcome +export type CopilotValidateOutcomeValue = (typeof CopilotValidateOutcome)[CopilotValidateOutcomeKey] export const CopilotVfsOutcome = { - PassthroughFitsBudget: "passthrough_fits_budget", - PassthroughNoMetadata: "passthrough_no_metadata", - PassthroughNoSharp: "passthrough_no_sharp", - RejectedNoMetadata: "rejected_no_metadata", - RejectedNoSharp: "rejected_no_sharp", - RejectedTooLargeAfterResize: "rejected_too_large_after_resize", - Resized: "resized", -} as const; - -export type CopilotVfsOutcomeKey = keyof typeof CopilotVfsOutcome; -export type CopilotVfsOutcomeValue = (typeof CopilotVfsOutcome)[CopilotVfsOutcomeKey]; + PassthroughFitsBudget: 'passthrough_fits_budget', + PassthroughNoMetadata: 'passthrough_no_metadata', + PassthroughNoSharp: 'passthrough_no_sharp', + RejectedNoMetadata: 'rejected_no_metadata', + RejectedNoSharp: 'rejected_no_sharp', + RejectedTooLargeAfterResize: 'rejected_too_large_after_resize', + Resized: 'resized', +} as const + +export type CopilotVfsOutcomeKey = keyof typeof CopilotVfsOutcome +export type CopilotVfsOutcomeValue = (typeof CopilotVfsOutcome)[CopilotVfsOutcomeKey] export const CopilotVfsReadOutcome = { - BinaryPlaceholder: "binary_placeholder", - DocumentParsed: "document_parsed", - ImagePrepared: "image_prepared", - ImageTooLarge: "image_too_large", - ParseFailed: "parse_failed", - ReadFailed: "read_failed", - TextRead: "text_read", - TextTooLarge: "text_too_large", -} as const; - -export type CopilotVfsReadOutcomeKey = keyof typeof CopilotVfsReadOutcome; -export type CopilotVfsReadOutcomeValue = (typeof CopilotVfsReadOutcome)[CopilotVfsReadOutcomeKey]; + BinaryPlaceholder: 'binary_placeholder', + DocumentParsed: 'document_parsed', + ImagePrepared: 'image_prepared', + ImageTooLarge: 'image_too_large', + ParseFailed: 'parse_failed', + ReadFailed: 'read_failed', + TextRead: 'text_read', + TextTooLarge: 'text_too_large', +} as const + +export type CopilotVfsReadOutcomeKey = keyof typeof CopilotVfsReadOutcome +export type CopilotVfsReadOutcomeValue = (typeof CopilotVfsReadOutcome)[CopilotVfsReadOutcomeKey] export const CopilotVfsReadPath = { - Binary: "binary", - Image: "image", - ParseableDocument: "parseable_document", - Text: "text", -} as const; + Binary: 'binary', + Image: 'image', + ParseableDocument: 'parseable_document', + Text: 'text', +} as const -export type CopilotVfsReadPathKey = keyof typeof CopilotVfsReadPath; -export type CopilotVfsReadPathValue = (typeof CopilotVfsReadPath)[CopilotVfsReadPathKey]; +export type CopilotVfsReadPathKey = keyof typeof CopilotVfsReadPath +export type CopilotVfsReadPathValue = (typeof CopilotVfsReadPath)[CopilotVfsReadPathKey] export const LlmErrorStage = { - BuildRequest: "build_request", - Decode: "decode", - HttpBuild: "http_build", - HttpStatus: "http_status", - Invoke: "invoke", - MarshalRequest: "marshal_request", - StreamClose: "stream_close", -} as const; - -export type LlmErrorStageKey = keyof typeof LlmErrorStage; -export type LlmErrorStageValue = (typeof LlmErrorStage)[LlmErrorStageKey]; + BuildRequest: 'build_request', + Decode: 'decode', + HttpBuild: 'http_build', + HttpStatus: 'http_status', + Invoke: 'invoke', + MarshalRequest: 'marshal_request', + StreamClose: 'stream_close', +} as const + +export type LlmErrorStageKey = keyof typeof LlmErrorStage +export type LlmErrorStageValue = (typeof LlmErrorStage)[LlmErrorStageKey] export const RateLimitOutcome = { - Allowed: "allowed", - IncrError: "incr_error", - Limited: "limited", -} as const; + Allowed: 'allowed', + IncrError: 'incr_error', + Limited: 'limited', +} as const -export type RateLimitOutcomeKey = keyof typeof RateLimitOutcome; -export type RateLimitOutcomeValue = (typeof RateLimitOutcome)[RateLimitOutcomeKey]; +export type RateLimitOutcomeKey = keyof typeof RateLimitOutcome +export type RateLimitOutcomeValue = (typeof RateLimitOutcome)[RateLimitOutcomeKey] export const ToolAsyncWaiterResolution = { - ContextCancelled: "context_cancelled", - Poll: "poll", - Pubsub: "pubsub", - StoredAfterClose: "stored_after_close", - StoredBeforeSubscribe: "stored_before_subscribe", - StoredPostSubscribe: "stored_post_subscribe", - SubscriptionClosed: "subscription_closed", - Unknown: "unknown", -} as const; - -export type ToolAsyncWaiterResolutionKey = keyof typeof ToolAsyncWaiterResolution; -export type ToolAsyncWaiterResolutionValue = (typeof ToolAsyncWaiterResolution)[ToolAsyncWaiterResolutionKey]; + ContextCancelled: 'context_cancelled', + Poll: 'poll', + Pubsub: 'pubsub', + StoredAfterClose: 'stored_after_close', + StoredBeforeSubscribe: 'stored_before_subscribe', + StoredPostSubscribe: 'stored_post_subscribe', + SubscriptionClosed: 'subscription_closed', + Unknown: 'unknown', +} as const + +export type ToolAsyncWaiterResolutionKey = keyof typeof ToolAsyncWaiterResolution +export type ToolAsyncWaiterResolutionValue = + (typeof ToolAsyncWaiterResolution)[ToolAsyncWaiterResolutionKey] export const ToolErrorKind = { - Dispatch: "dispatch", - NotFound: "not_found", -} as const; + Dispatch: 'dispatch', + NotFound: 'not_found', +} as const -export type ToolErrorKindKey = keyof typeof ToolErrorKind; -export type ToolErrorKindValue = (typeof ToolErrorKind)[ToolErrorKindKey]; +export type ToolErrorKindKey = keyof typeof ToolErrorKind +export type ToolErrorKindValue = (typeof ToolErrorKind)[ToolErrorKindKey] export const ToolExecutor = { - Client: "client", - Go: "go", - Sim: "sim", -} as const; + Client: 'client', + Go: 'go', + Sim: 'sim', +} as const -export type ToolExecutorKey = keyof typeof ToolExecutor; -export type ToolExecutorValue = (typeof ToolExecutor)[ToolExecutorKey]; +export type ToolExecutorKey = keyof typeof ToolExecutor +export type ToolExecutorValue = (typeof ToolExecutor)[ToolExecutorKey] export const ToolStoreStatus = { - Cancelled: "cancelled", - Completed: "completed", - Failed: "failed", - Pending: "pending", -} as const; - -export type ToolStoreStatusKey = keyof typeof ToolStoreStatus; -export type ToolStoreStatusValue = (typeof ToolStoreStatus)[ToolStoreStatusKey]; + Cancelled: 'cancelled', + Completed: 'completed', + Failed: 'failed', + Pending: 'pending', +} as const + +export type ToolStoreStatusKey = keyof typeof ToolStoreStatus +export type ToolStoreStatusValue = (typeof ToolStoreStatus)[ToolStoreStatusKey] diff --git a/apps/sim/lib/copilot/generated/trace-attributes-v1.ts b/apps/sim/lib/copilot/generated/trace-attributes-v1.ts index 499d4ddc76d..973e3a0417d 100644 --- a/apps/sim/lib/copilot/generated/trace-attributes-v1.ts +++ b/apps/sim/lib/copilot/generated/trace-attributes-v1.ts @@ -16,963 +16,963 @@ // by the upstream OTel spec, not by this contract. export const TraceAttr = { - AbortBackend: "abort.backend", - AbortFound: "abort.found", - AbortRedisResult: "abort.redis_result", - AnalyticsAborted: "analytics.aborted", - AnalyticsBilledTotalCost: "analytics.billed_total_cost", - AnalyticsCacheReadTokens: "analytics.cache_read_tokens", - AnalyticsCacheWriteTokens: "analytics.cache_write_tokens", - AnalyticsCustomerType: "analytics.customer_type", - AnalyticsDurationMs: "analytics.duration_ms", - AnalyticsError: "analytics.error", - AnalyticsInputTokens: "analytics.input_tokens", - AnalyticsModel: "analytics.model", - AnalyticsOutputTokens: "analytics.output_tokens", - AnalyticsProvider: "analytics.provider", - AnalyticsSource: "analytics.source", - AnalyticsToolCallCount: "analytics.tool_call_count", - ApiKeyId: "api_key.id", - ApiKeyName: "api_key.name", - AuthIncomingInternal: "auth.incoming_internal", - AuthKeyMatch: "auth.key.match", - AuthKeyPreview: "auth.key.preview", - AuthKeySource: "auth.key.source", - AuthKeyType: "auth.key.type", - AuthProvider: "auth.provider", - AuthValidateStatusCode: "auth.validate.status_code", - AwsRegion: "aws.region", - BedrockErrorCode: "bedrock.error_code", - BedrockModelId: "bedrock.model_id", - BedrockRequestBodyBytesRetry: "bedrock.request.body_bytes_retry", - BillingAttempts: "billing.attempts", - BillingChangeType: "billing.change_type", - BillingCostInputUsd: "billing.cost.input_usd", - BillingCostOutputUsd: "billing.cost.output_usd", - BillingCostTotalUsd: "billing.cost.total_usd", - BillingCostUsd: "billing.cost_usd", - BillingCustomerType: "billing.customer_type", - BillingDuplicate: "billing.duplicate", - BillingDurationMs: "billing.duration_ms", - BillingHasIdempotencyKey: "billing.has_idempotency_key", - BillingIdempotencyKey: "billing.idempotency_key", - BillingInterval: "billing.interval", - BillingIsMcp: "billing.is_mcp", - BillingLlmCost: "billing.llm_cost", - BillingNewPlan: "billing.new_plan", - BillingOutcome: "billing.outcome", - BillingPlan: "billing.plan", - BillingPreviousPlan: "billing.previous_plan", - BillingServiceCharges: "billing.service_charges", - BillingSource: "billing.source", - BillingTotalCost: "billing.total_cost", - BillingUsageCurrent: "billing.usage.current", - BillingUsageExceeded: "billing.usage.exceeded", - BillingUsageLimit: "billing.usage.limit", - BlockId: "block.id", - BlockName: "block.name", - BlockType: "block.type", - ChatActiveMessagesBytes: "chat.active_messages_bytes", - ChatActiveMessagesCount: "chat.active_messages_count", - ChatAppendBytes: "chat.append_bytes", - ChatAppendCount: "chat.append_count", - ChatArtifactKeys: "chat.artifact_keys", - ChatArtifactsBytes: "chat.artifacts_bytes", - ChatAuthType: "chat.auth_type", - ChatContextCount: "chat.context_count", - ChatContextUsage: "chat.context_usage", - ChatContinuationMessagesBefore: "chat.continuation.messages_before", - ChatContinuationToolResultBytes: "chat.continuation.tool_result_bytes", - ChatContinuationToolResultFailure: "chat.continuation.tool_result_failure", - ChatContinuationToolResultSuccess: "chat.continuation.tool_result_success", - ChatContinuationToolResults: "chat.continuation.tool_results", - ChatContinuationTotalToolCalls: "chat.continuation.total_tool_calls", - ChatExistingMessageCount: "chat.existing_message_count", - ChatFileAttachmentCount: "chat.file_attachment_count", - ChatFinalizeOutcome: "chat.finalize.outcome", - ChatFound: "chat.found", - ChatHasAssistantMessage: "chat.has_assistant_message", - ChatHasOutputConfigs: "chat.has_output_configs", - ChatId: "chat.id", - ChatMessageBytes: "chat.message_bytes", - ChatMessagesAfter: "chat.messages_after", - ChatMessagesBytes: "chat.messages_bytes", - ChatMessagesCount: "chat.messages_count", - ChatPersistOutcome: "chat.persist.outcome", - ChatPreexisting: "chat.preexisting", - ChatRollbackIndex: "chat.rollback_index", - ChatTokensUsed: "chat.tokens_used", - ChatType: "chat.type", - ChatUserMessageId: "chat.user_message_id", - CheckpointAge: "checkpoint.age", - CheckpointAttemptsBytes: "checkpoint.attempts_bytes", - CheckpointBytesAssistantToolUse: "checkpoint.bytes.assistant_tool_use", - CheckpointBytesCurrentMessages: "checkpoint.bytes.current_messages", - CheckpointBytesImmediateResults: "checkpoint.bytes.immediate_results", - CheckpointBytesPendingToolCalls: "checkpoint.bytes.pending_tool_calls", - CheckpointBytesProviderRequest: "checkpoint.bytes.provider_request", - CheckpointBytesRequestContext: "checkpoint.bytes.request_context", - CheckpointBytesToolUsage: "checkpoint.bytes.tool_usage", - CheckpointCachedCredentialsBytes: "checkpoint.cached_credentials_bytes", - CheckpointClaimed: "checkpoint.claimed", - CheckpointClaimedNow: "checkpoint.claimed_now", - CheckpointCompletedBytes: "checkpoint.completed_bytes", - CheckpointCompletedSteps: "checkpoint.completed_steps", - CheckpointCurrentMessages: "checkpoint.current_messages", - CheckpointDecisionsBytes: "checkpoint.decisions_bytes", - CheckpointFound: "checkpoint.found", - CheckpointFrames: "checkpoint.frames", - CheckpointId: "checkpoint.id", - CheckpointImmediateResults: "checkpoint.immediate_results", - CheckpointMessageId: "checkpoint.message_id", - CheckpointPendingBytes: "checkpoint.pending_bytes", - CheckpointPendingSteps: "checkpoint.pending_steps", - CheckpointPendingToolCount: "checkpoint.pending_tool_count", - CheckpointRows: "checkpoint.rows", - CheckpointTaskId: "checkpoint.task_id", - CheckpointTotalToolCalls: "checkpoint.total_tool_calls", - CheckpointWorkflowSnapshotBytes: "checkpoint.workflow_snapshot_bytes", - ClientVersion: "client.version", - ConditionId: "condition.id", - ConditionName: "condition.name", - ConditionResult: "condition.result", - ContextReduceBudgetChars: "context.reduce.budget_chars", - ContextReduceCaller: "context.reduce.caller", - ContextReduceDidReduce: "context.reduce.did_reduce", - ContextReduceInputChars: "context.reduce.input_chars", - ContextReduceInputMessages: "context.reduce.input_messages", - ContextReduceOutcome: "context.reduce.outcome", - ContextReduceOutputChars: "context.reduce.output_chars", - ContextReduceOutputMessages: "context.reduce.output_messages", - ContextReduced: "context.reduced", - ContextSummarizeInputChars: "context.summarize.input_chars", - ContextSummarizeOutputChars: "context.summarize.output_chars", - CopilotAbortControllerFired: "copilot.abort.controller_fired", - CopilotAbortGoMarkerOk: "copilot.abort.go_marker_ok", - CopilotAbortLocalAborted: "copilot.abort.local_aborted", - CopilotAbortMarkerWritten: "copilot.abort.marker_written", - CopilotAbortOutcome: "copilot.abort.outcome", - CopilotAbortUnknownReason: "copilot.abort.unknown_reason", - CopilotAsyncToolClaimedBy: "copilot.async_tool.claimed_by", - CopilotAsyncToolHasError: "copilot.async_tool.has_error", - CopilotAsyncToolIdsCount: "copilot.async_tool.ids_count", - CopilotAsyncToolStatus: "copilot.async_tool.status", - CopilotAsyncToolWorkerId: "copilot.async_tool.worker_id", - CopilotBranchKind: "copilot.branch.kind", - CopilotChatIsNew: "copilot.chat.is_new", - CopilotCommandsCount: "copilot.commands.count", - CopilotConfirmOutcome: "copilot.confirm.outcome", - CopilotContextsCount: "copilot.contexts.count", - CopilotFileAttachmentsCount: "copilot.file_attachments.count", - CopilotFinalizeOutcome: "copilot.finalize.outcome", - CopilotInterruptedPriorStream: "copilot.interrupted_prior_stream", - CopilotLeg: "copilot.leg", - CopilotMode: "copilot.mode", - CopilotOperation: "copilot.operation", - CopilotOutputFileBytes: "copilot.output_file.bytes", - CopilotOutputFileFormat: "copilot.output_file.format", - CopilotOutputFileId: "copilot.output_file.id", - CopilotOutputFileName: "copilot.output_file.name", - CopilotOutputFileOutcome: "copilot.output_file.outcome", - CopilotPendingStreamWaitMs: "copilot.pending_stream.wait_ms", - CopilotPrefetch: "copilot.prefetch", - CopilotPublisherClientDisconnected: "copilot.publisher.client_disconnected", - CopilotPublisherSawComplete: "copilot.publisher.saw_complete", - CopilotRecoveryLatestSeq: "copilot.recovery.latest_seq", - CopilotRecoveryOldestSeq: "copilot.recovery.oldest_seq", - CopilotRecoveryOutcome: "copilot.recovery.outcome", - CopilotRecoveryRequestedAfterSeq: "copilot.recovery.requested_after_seq", - CopilotRequestCancelReason: "copilot.request.cancel_reason", - CopilotRequestOutcome: "copilot.request.outcome", - CopilotResourceAttachmentsCount: "copilot.resource_attachments.count", - CopilotResourcesAborted: "copilot.resources.aborted", - CopilotResourcesOp: "copilot.resources.op", - CopilotResourcesRemovedCount: "copilot.resources.removed_count", - CopilotResourcesUpsertedCount: "copilot.resources.upserted_count", - CopilotResultContentBlocks: "copilot.result.content_blocks", - CopilotResultContentLength: "copilot.result.content_length", - CopilotResultToolCalls: "copilot.result.tool_calls", - CopilotResumeAfterCursor: "copilot.resume.after_cursor", - CopilotResumeDurationMs: "copilot.resume.duration_ms", - CopilotResumeEventCount: "copilot.resume.event_count", - CopilotResumeOutcome: "copilot.resume.outcome", - CopilotResumePollIterations: "copilot.resume.poll_iterations", - CopilotResumePreviewSessionCount: "copilot.resume.preview_session_count", - CopilotRoute: "copilot.route", - CopilotRunAgent: "copilot.run.agent", - CopilotRunHasCompletedAt: "copilot.run.has_completed_at", - CopilotRunHasError: "copilot.run.has_error", - CopilotRunModel: "copilot.run.model", - CopilotRunParentId: "copilot.run.parent_id", - CopilotRunProvider: "copilot.run.provider", - CopilotRunStatus: "copilot.run.status", - CopilotStopAppendedAssistant: "copilot.stop.appended_assistant", - CopilotStopBlocksCount: "copilot.stop.blocks_count", - CopilotStopContentLength: "copilot.stop.content_length", - CopilotStopOutcome: "copilot.stop.outcome", - CopilotStream: "copilot.stream", - CopilotSurface: "copilot.surface", - CopilotTableId: "copilot.table.id", - CopilotTableOutcome: "copilot.table.outcome", - CopilotTableRowCount: "copilot.table.row_count", - CopilotTableSourceContentBytes: "copilot.table.source.content_bytes", - CopilotTableSourceFormat: "copilot.table.source.format", - CopilotTableSourcePath: "copilot.table.source.path", - CopilotTraceSpanCount: "copilot.trace.span_count", - CopilotTransport: "copilot.transport", - CopilotUserMessagePreview: "copilot.user.message_preview", - CopilotValidateOutcome: "copilot.validate.outcome", - CopilotVfsFileExtension: "copilot.vfs.file.extension", - CopilotVfsFileMediaType: "copilot.vfs.file.media_type", - CopilotVfsFileName: "copilot.vfs.file.name", - CopilotVfsFileSizeBytes: "copilot.vfs.file.size_bytes", - CopilotVfsHasAlpha: "copilot.vfs.has_alpha", - CopilotVfsInputBytes: "copilot.vfs.input.bytes", - CopilotVfsInputHeight: "copilot.vfs.input.height", - CopilotVfsInputMediaTypeClaimed: "copilot.vfs.input.media_type_claimed", - CopilotVfsInputMediaTypeDetected: "copilot.vfs.input.media_type_detected", - CopilotVfsInputWidth: "copilot.vfs.input.width", - CopilotVfsMetadataFailed: "copilot.vfs.metadata.failed", - CopilotVfsOutcome: "copilot.vfs.outcome", - CopilotVfsOutputBytes: "copilot.vfs.output.bytes", - CopilotVfsOutputMediaType: "copilot.vfs.output.media_type", - CopilotVfsReadImageResized: "copilot.vfs.read.image.resized", - CopilotVfsReadOutcome: "copilot.vfs.read.outcome", - CopilotVfsReadOutputBytes: "copilot.vfs.read.output.bytes", - CopilotVfsReadOutputLines: "copilot.vfs.read.output.lines", - CopilotVfsReadOutputMediaType: "copilot.vfs.read.output.media_type", - CopilotVfsReadPath: "copilot.vfs.read.path", - CopilotVfsResizeAttempts: "copilot.vfs.resize.attempts", - CopilotVfsResizeChosenDimension: "copilot.vfs.resize.chosen_dimension", - CopilotVfsResizeChosenQuality: "copilot.vfs.resize.chosen_quality", - CopilotVfsResized: "copilot.vfs.resized", - CopilotVfsSharpLoadFailed: "copilot.vfs.sharp.load_failed", - CostDefaultCost: "cost.default_cost", - CredentialSetId: "credential_set.id", - CredentialSetName: "credential_set.name", - DbOperation: "db.operation", - DbSqlTable: "db.sql.table", - DbSystem: "db.system", - DeploymentEnvironment: "deployment.environment", - DeploymentVersion: "deployment.version", - DocumentFileSize: "document.file_size", - DocumentMimeType: "document.mime_type", - DocumentsCount: "documents.count", - DocumentsUploadType: "documents.upload_type", - Error: "error", - ErrorCode: "error.code", - ErrorInternal: "error.internal", - ErrorMessage: "error.message", - ErrorType: "error.type", - EventName: "event.name", - EventTimestamp: "event.timestamp", - ExecutionBlocksExecuted: "execution.blocks_executed", - ExecutionDurationMs: "execution.duration_ms", - ExecutionErrorMessage: "execution.error_message", - ExecutionHasErrors: "execution.has_errors", - ExecutionStatus: "execution.status", - ExecutionTotalCost: "execution.total_cost", - ExecutionTrigger: "execution.trigger", - FunctionExecutionTimeMs: "function.execution_time_ms", - FunctionId: "function.id", - FunctionName: "function.name", - GenAiAgentId: "gen_ai.agent.id", - GenAiAgentName: "gen_ai.agent.name", - GenAiCostInput: "gen_ai.cost.input", - GenAiCostOutput: "gen_ai.cost.output", - GenAiCostTotal: "gen_ai.cost.total", - GenAiInputMessages: "gen_ai.input.messages", - GenAiOperationName: "gen_ai.operation.name", - GenAiOutputMessages: "gen_ai.output.messages", - GenAiRequestAssistantMessages: "gen_ai.request.assistant_messages", - GenAiRequestContentBlocks: "gen_ai.request.content_blocks", - GenAiRequestHasCacheControl: "gen_ai.request.has_cache_control", - GenAiRequestImageBlocks: "gen_ai.request.image_blocks", - GenAiRequestImageDataBytes: "gen_ai.request.image_data_bytes", - GenAiRequestMaxMessageBlocks: "gen_ai.request.max_message_blocks", - GenAiRequestMessagesCount: "gen_ai.request.messages.count", - GenAiRequestModel: "gen_ai.request.model", - GenAiRequestSystemChars: "gen_ai.request.system_chars", - GenAiRequestTextBlocks: "gen_ai.request.text_blocks", - GenAiRequestToolResultBlocks: "gen_ai.request.tool_result_blocks", - GenAiRequestToolUseBlocks: "gen_ai.request.tool_use_blocks", - GenAiRequestToolsCount: "gen_ai.request.tools.count", - GenAiRequestUserMessages: "gen_ai.request.user_messages", - GenAiSystem: "gen_ai.system", - GenAiToolName: "gen_ai.tool.name", - GenAiUsageCacheCreationTokens: "gen_ai.usage.cache_creation_tokens", - GenAiUsageCacheReadTokens: "gen_ai.usage.cache_read_tokens", - GenAiUsageInputTokens: "gen_ai.usage.input_tokens", - GenAiUsageOutputTokens: "gen_ai.usage.output_tokens", - GenAiUsageTotalTokens: "gen_ai.usage.total_tokens", - GenAiWorkflowExecutionId: "gen_ai.workflow.execution_id", - GenAiWorkflowId: "gen_ai.workflow.id", - GenAiWorkflowName: "gen_ai.workflow.name", - HostedKeyEnvVar: "hosted_key.env_var", - HttpHost: "http.host", - HttpMethod: "http.method", - HttpPath: "http.path", - HttpRemoteAddr: "http.remote_addr", - HttpRequestContentLength: "http.request.content_length", - HttpResponseBodyBytes: "http.response.body_bytes", - HttpResponseContentLength: "http.response.content_length", - HttpResponseHeadersMs: "http.response.headers_ms", - HttpResponseTotalMs: "http.response.total_ms", - HttpServerDurationMs: "http.server.duration_ms", - HttpStatusCode: "http.status_code", - HttpTarget: "http.target", - HttpUrl: "http.url", - HttpUserAgent: "http.user_agent", - InvitationRole: "invitation.role", - KnowledgeBaseId: "knowledge_base.id", - KnowledgeBaseName: "knowledge_base.name", - LlmErrorStage: "llm.error_stage", - LlmRequestBodyBytes: "llm.request.body_bytes", - LlmStreamBytes: "llm.stream.bytes", - LlmStreamChunks: "llm.stream.chunks", - LlmStreamFirstChunkBytes: "llm.stream.first_chunk_bytes", - LlmStreamFirstChunkMs: "llm.stream.first_chunk_ms", - LlmStreamOpenMs: "llm.stream.open_ms", - LlmStreamTotalMs: "llm.stream.total_ms", - LockAcquired: "lock.acquired", - LockBackend: "lock.backend", - LockTimedOut: "lock.timed_out", - LockTimeoutMs: "lock.timeout_ms", - LoopId: "loop.id", - LoopIterations: "loop.iterations", - LoopName: "loop.name", - McpExecutionStatus: "mcp.execution_status", - McpServerId: "mcp.server_id", - McpServerName: "mcp.server_name", - McpToolName: "mcp.tool_name", - McpTransport: "mcp.transport", - MemberRole: "member.role", - MemoryContentBytes: "memory.content_bytes", - MemoryFound: "memory.found", - MemoryPath: "memory.path", - MemoryRowCount: "memory.row_count", - MessageId: "message.id", - MessagingDestinationName: "messaging.destination.name", - MessagingSystem: "messaging.system", - ModelDurationMs: "model.duration_ms", - ModelId: "model.id", - ModelName: "model.name", - MothershipOrigin: "mothership.origin", - NetPeerName: "net.peer.name", - OauthProvider: "oauth.provider", - ParallelBranches: "parallel.branches", - ParallelId: "parallel.id", - ParallelName: "parallel.name", - PrefsToolCount: "prefs.tool_count", - ProcessingChunkSize: "processing.chunk_size", - ProcessingRecipe: "processing.recipe", - ProviderId: "provider.id", - RateLimitAttempt: "rate_limit.attempt", - RateLimitCount: "rate_limit.count", - RateLimitDelayMs: "rate_limit.delay_ms", - RateLimitLimit: "rate_limit.limit", - RateLimitMaxRetries: "rate_limit.max_retries", - RateLimitOutcome: "rate_limit.outcome", - RateLimitRetryAfterMs: "rate_limit.retry_after_ms", - RequestGoTraceId: "request.go_trace_id", - RequestId: "request.id", - RequiredVersion: "required.version", - ResumeRequestBodyBytes: "resume.request.body_bytes", - ResumeResultsCount: "resume.results.count", - ResumeResultsDataBytes: "resume.results.data_bytes", - ResumeResultsFailureCount: "resume.results.failure_count", - ResumeResultsSuccessCount: "resume.results.success_count", - RouterBackendName: "router.backend_name", - RouterBedrockEnabled: "router.bedrock_enabled", - RouterBedrockSupportedModel: "router.bedrock_supported_model", - RouterId: "router.id", - RouterName: "router.name", - RouterSelectedBackend: "router.selected_backend", - RouterSelectedPath: "router.selected_path", - RunId: "run.id", - SearchResultsCount: "search.results_count", - ServiceInstanceId: "service.instance.id", - ServiceName: "service.name", - ServiceNamespace: "service.namespace", - ServiceVersion: "service.version", - SettleCompleted: "settle.completed", - SettleTimeoutMs: "settle.timeout_ms", - SettleWaitMs: "settle.wait_ms", - SimOperation: "sim.operation", - SimRequestId: "sim.request_id", - SpanDurationMs: "span.duration_ms", - SpanStatus: "span.status", - SpanType: "span.type", - StreamId: "stream.id", - SubagentId: "subagent.id", - SubagentOutcomeContentBytes: "subagent.outcome.content_bytes", - SubagentOutcomeError: "subagent.outcome.error", - SubagentOutcomeStructuredType: "subagent.outcome.structured_type", - SubagentOutcomeSuccess: "subagent.outcome.success", - SubagentOutcomeToolCallCount: "subagent.outcome.tool_call_count", - TaskAge: "task.age", - TaskDecisionCount: "task.decision_count", - TaskErrorCount: "task.error_count", - TaskFound: "task.found", - TaskId: "task.id", - TaskListLimit: "task.list_limit", - TaskRows: "task.rows", - TaskStatus: "task.status", - TaskStepCount: "task.step_count", - TelemetrySdkLanguage: "telemetry.sdk.language", - TelemetrySdkName: "telemetry.sdk.name", - TelemetrySdkVersion: "telemetry.sdk.version", - TemplateId: "template.id", - TemplateName: "template.name", - ThrottleReason: "throttle.reason", - ToolArgsBytes: "tool.args.bytes", - ToolArgsCount: "tool.args.count", - ToolArgsPreview: "tool.args.preview", - ToolAsyncWaiterPollCount: "tool.async_waiter.poll_count", - ToolAsyncWaiterPubsubDeliveries: "tool.async_waiter.pubsub_deliveries", - ToolAsyncWaiterResolution: "tool.async_waiter.resolution", - ToolCallId: "tool.call_id", - ToolClientExecutable: "tool.client_executable", - ToolCompletionReceived: "tool.completion.received", - ToolConfirmationStatus: "tool.confirmation.status", - ToolDurationMs: "tool.duration_ms", - ToolErrorKind: "tool.error_kind", - ToolExecutor: "tool.executor", - ToolExternalService: "tool.external.service", - ToolId: "tool.id", - ToolName: "tool.name", - ToolOutcome: "tool.outcome", - ToolOutcomeMessage: "tool.outcome.message", - ToolParentSpan: "tool.parent_span", - ToolPayloadBytes: "tool.payload.bytes", - ToolResultArtifact: "tool.result.artifact", - ToolResultBytes: "tool.result.bytes", - ToolResultSuccess: "tool.result.success", - ToolScheduled: "tool.scheduled", - ToolStatus: "tool.status", - ToolStatusCode: "tool.status_code", - ToolStoreStatus: "tool.store_status", - ToolSync: "tool.sync", - ToolTimeoutMs: "tool.timeout_ms", - TraceAborted: "trace.aborted", - TraceBilledTotalCost: "trace.billed_total_cost", - TraceCacheReadTokens: "trace.cache_read_tokens", - TraceCacheWriteTokens: "trace.cache_write_tokens", - TraceDurationMs: "trace.duration_ms", - TraceError: "trace.error", - TraceGoId: "trace.go_id", - TraceInputTokens: "trace.input_tokens", - TraceModel: "trace.model", - TraceOutcome: "trace.outcome", - TraceOutputTokens: "trace.output_tokens", - TraceProvider: "trace.provider", - TraceRawTotalCost: "trace.raw_total_cost", - TraceSpanCount: "trace.span_count", - TraceToolCallCount: "trace.tool_call_count", - UserAuthMethod: "user.auth_method", - UserAuthProvider: "user.auth_provider", - UserId: "user.id", - WebhookId: "webhook.id", - WebhookProvider: "webhook.provider", - WebhookTriggerSuccess: "webhook.trigger_success", - WorkflowBlockTypes: "workflow.block_types", - WorkflowBlocksCount: "workflow.blocks_count", - WorkflowCreatedId: "workflow.created_id", - WorkflowDurationMs: "workflow.duration_ms", - WorkflowEdgesCount: "workflow.edges_count", - WorkflowExecutionId: "workflow.execution_id", - WorkflowHasFolder: "workflow.has_folder", - WorkflowHasWorkspace: "workflow.has_workspace", - WorkflowId: "workflow.id", - WorkflowLoopsCount: "workflow.loops_count", - WorkflowName: "workflow.name", - WorkflowNewId: "workflow.new_id", - WorkflowParallelsCount: "workflow.parallels_count", - WorkflowSourceId: "workflow.source_id", - WorkflowTrigger: "workflow.trigger", - WorkspaceId: "workspace.id", - WorkspaceName: "workspace.name", -} as const; + AbortBackend: 'abort.backend', + AbortFound: 'abort.found', + AbortRedisResult: 'abort.redis_result', + AnalyticsAborted: 'analytics.aborted', + AnalyticsBilledTotalCost: 'analytics.billed_total_cost', + AnalyticsCacheReadTokens: 'analytics.cache_read_tokens', + AnalyticsCacheWriteTokens: 'analytics.cache_write_tokens', + AnalyticsCustomerType: 'analytics.customer_type', + AnalyticsDurationMs: 'analytics.duration_ms', + AnalyticsError: 'analytics.error', + AnalyticsInputTokens: 'analytics.input_tokens', + AnalyticsModel: 'analytics.model', + AnalyticsOutputTokens: 'analytics.output_tokens', + AnalyticsProvider: 'analytics.provider', + AnalyticsSource: 'analytics.source', + AnalyticsToolCallCount: 'analytics.tool_call_count', + ApiKeyId: 'api_key.id', + ApiKeyName: 'api_key.name', + AuthIncomingInternal: 'auth.incoming_internal', + AuthKeyMatch: 'auth.key.match', + AuthKeyPreview: 'auth.key.preview', + AuthKeySource: 'auth.key.source', + AuthKeyType: 'auth.key.type', + AuthProvider: 'auth.provider', + AuthValidateStatusCode: 'auth.validate.status_code', + AwsRegion: 'aws.region', + BedrockErrorCode: 'bedrock.error_code', + BedrockModelId: 'bedrock.model_id', + BedrockRequestBodyBytesRetry: 'bedrock.request.body_bytes_retry', + BillingAttempts: 'billing.attempts', + BillingChangeType: 'billing.change_type', + BillingCostInputUsd: 'billing.cost.input_usd', + BillingCostOutputUsd: 'billing.cost.output_usd', + BillingCostTotalUsd: 'billing.cost.total_usd', + BillingCostUsd: 'billing.cost_usd', + BillingCustomerType: 'billing.customer_type', + BillingDuplicate: 'billing.duplicate', + BillingDurationMs: 'billing.duration_ms', + BillingHasIdempotencyKey: 'billing.has_idempotency_key', + BillingIdempotencyKey: 'billing.idempotency_key', + BillingInterval: 'billing.interval', + BillingIsMcp: 'billing.is_mcp', + BillingLlmCost: 'billing.llm_cost', + BillingNewPlan: 'billing.new_plan', + BillingOutcome: 'billing.outcome', + BillingPlan: 'billing.plan', + BillingPreviousPlan: 'billing.previous_plan', + BillingServiceCharges: 'billing.service_charges', + BillingSource: 'billing.source', + BillingTotalCost: 'billing.total_cost', + BillingUsageCurrent: 'billing.usage.current', + BillingUsageExceeded: 'billing.usage.exceeded', + BillingUsageLimit: 'billing.usage.limit', + BlockId: 'block.id', + BlockName: 'block.name', + BlockType: 'block.type', + ChatActiveMessagesBytes: 'chat.active_messages_bytes', + ChatActiveMessagesCount: 'chat.active_messages_count', + ChatAppendBytes: 'chat.append_bytes', + ChatAppendCount: 'chat.append_count', + ChatArtifactKeys: 'chat.artifact_keys', + ChatArtifactsBytes: 'chat.artifacts_bytes', + ChatAuthType: 'chat.auth_type', + ChatContextCount: 'chat.context_count', + ChatContextUsage: 'chat.context_usage', + ChatContinuationMessagesBefore: 'chat.continuation.messages_before', + ChatContinuationToolResultBytes: 'chat.continuation.tool_result_bytes', + ChatContinuationToolResultFailure: 'chat.continuation.tool_result_failure', + ChatContinuationToolResultSuccess: 'chat.continuation.tool_result_success', + ChatContinuationToolResults: 'chat.continuation.tool_results', + ChatContinuationTotalToolCalls: 'chat.continuation.total_tool_calls', + ChatExistingMessageCount: 'chat.existing_message_count', + ChatFileAttachmentCount: 'chat.file_attachment_count', + ChatFinalizeOutcome: 'chat.finalize.outcome', + ChatFound: 'chat.found', + ChatHasAssistantMessage: 'chat.has_assistant_message', + ChatHasOutputConfigs: 'chat.has_output_configs', + ChatId: 'chat.id', + ChatMessageBytes: 'chat.message_bytes', + ChatMessagesAfter: 'chat.messages_after', + ChatMessagesBytes: 'chat.messages_bytes', + ChatMessagesCount: 'chat.messages_count', + ChatPersistOutcome: 'chat.persist.outcome', + ChatPreexisting: 'chat.preexisting', + ChatRollbackIndex: 'chat.rollback_index', + ChatTokensUsed: 'chat.tokens_used', + ChatType: 'chat.type', + ChatUserMessageId: 'chat.user_message_id', + CheckpointAge: 'checkpoint.age', + CheckpointAttemptsBytes: 'checkpoint.attempts_bytes', + CheckpointBytesAssistantToolUse: 'checkpoint.bytes.assistant_tool_use', + CheckpointBytesCurrentMessages: 'checkpoint.bytes.current_messages', + CheckpointBytesImmediateResults: 'checkpoint.bytes.immediate_results', + CheckpointBytesPendingToolCalls: 'checkpoint.bytes.pending_tool_calls', + CheckpointBytesProviderRequest: 'checkpoint.bytes.provider_request', + CheckpointBytesRequestContext: 'checkpoint.bytes.request_context', + CheckpointBytesToolUsage: 'checkpoint.bytes.tool_usage', + CheckpointCachedCredentialsBytes: 'checkpoint.cached_credentials_bytes', + CheckpointClaimed: 'checkpoint.claimed', + CheckpointClaimedNow: 'checkpoint.claimed_now', + CheckpointCompletedBytes: 'checkpoint.completed_bytes', + CheckpointCompletedSteps: 'checkpoint.completed_steps', + CheckpointCurrentMessages: 'checkpoint.current_messages', + CheckpointDecisionsBytes: 'checkpoint.decisions_bytes', + CheckpointFound: 'checkpoint.found', + CheckpointFrames: 'checkpoint.frames', + CheckpointId: 'checkpoint.id', + CheckpointImmediateResults: 'checkpoint.immediate_results', + CheckpointMessageId: 'checkpoint.message_id', + CheckpointPendingBytes: 'checkpoint.pending_bytes', + CheckpointPendingSteps: 'checkpoint.pending_steps', + CheckpointPendingToolCount: 'checkpoint.pending_tool_count', + CheckpointRows: 'checkpoint.rows', + CheckpointTaskId: 'checkpoint.task_id', + CheckpointTotalToolCalls: 'checkpoint.total_tool_calls', + CheckpointWorkflowSnapshotBytes: 'checkpoint.workflow_snapshot_bytes', + ClientVersion: 'client.version', + ConditionId: 'condition.id', + ConditionName: 'condition.name', + ConditionResult: 'condition.result', + ContextReduceBudgetChars: 'context.reduce.budget_chars', + ContextReduceCaller: 'context.reduce.caller', + ContextReduceDidReduce: 'context.reduce.did_reduce', + ContextReduceInputChars: 'context.reduce.input_chars', + ContextReduceInputMessages: 'context.reduce.input_messages', + ContextReduceOutcome: 'context.reduce.outcome', + ContextReduceOutputChars: 'context.reduce.output_chars', + ContextReduceOutputMessages: 'context.reduce.output_messages', + ContextReduced: 'context.reduced', + ContextSummarizeInputChars: 'context.summarize.input_chars', + ContextSummarizeOutputChars: 'context.summarize.output_chars', + CopilotAbortControllerFired: 'copilot.abort.controller_fired', + CopilotAbortGoMarkerOk: 'copilot.abort.go_marker_ok', + CopilotAbortLocalAborted: 'copilot.abort.local_aborted', + CopilotAbortMarkerWritten: 'copilot.abort.marker_written', + CopilotAbortOutcome: 'copilot.abort.outcome', + CopilotAbortUnknownReason: 'copilot.abort.unknown_reason', + CopilotAsyncToolClaimedBy: 'copilot.async_tool.claimed_by', + CopilotAsyncToolHasError: 'copilot.async_tool.has_error', + CopilotAsyncToolIdsCount: 'copilot.async_tool.ids_count', + CopilotAsyncToolStatus: 'copilot.async_tool.status', + CopilotAsyncToolWorkerId: 'copilot.async_tool.worker_id', + CopilotBranchKind: 'copilot.branch.kind', + CopilotChatIsNew: 'copilot.chat.is_new', + CopilotCommandsCount: 'copilot.commands.count', + CopilotConfirmOutcome: 'copilot.confirm.outcome', + CopilotContextsCount: 'copilot.contexts.count', + CopilotFileAttachmentsCount: 'copilot.file_attachments.count', + CopilotFinalizeOutcome: 'copilot.finalize.outcome', + CopilotInterruptedPriorStream: 'copilot.interrupted_prior_stream', + CopilotLeg: 'copilot.leg', + CopilotMode: 'copilot.mode', + CopilotOperation: 'copilot.operation', + CopilotOutputFileBytes: 'copilot.output_file.bytes', + CopilotOutputFileFormat: 'copilot.output_file.format', + CopilotOutputFileId: 'copilot.output_file.id', + CopilotOutputFileName: 'copilot.output_file.name', + CopilotOutputFileOutcome: 'copilot.output_file.outcome', + CopilotPendingStreamWaitMs: 'copilot.pending_stream.wait_ms', + CopilotPrefetch: 'copilot.prefetch', + CopilotPublisherClientDisconnected: 'copilot.publisher.client_disconnected', + CopilotPublisherSawComplete: 'copilot.publisher.saw_complete', + CopilotRecoveryLatestSeq: 'copilot.recovery.latest_seq', + CopilotRecoveryOldestSeq: 'copilot.recovery.oldest_seq', + CopilotRecoveryOutcome: 'copilot.recovery.outcome', + CopilotRecoveryRequestedAfterSeq: 'copilot.recovery.requested_after_seq', + CopilotRequestCancelReason: 'copilot.request.cancel_reason', + CopilotRequestOutcome: 'copilot.request.outcome', + CopilotResourceAttachmentsCount: 'copilot.resource_attachments.count', + CopilotResourcesAborted: 'copilot.resources.aborted', + CopilotResourcesOp: 'copilot.resources.op', + CopilotResourcesRemovedCount: 'copilot.resources.removed_count', + CopilotResourcesUpsertedCount: 'copilot.resources.upserted_count', + CopilotResultContentBlocks: 'copilot.result.content_blocks', + CopilotResultContentLength: 'copilot.result.content_length', + CopilotResultToolCalls: 'copilot.result.tool_calls', + CopilotResumeAfterCursor: 'copilot.resume.after_cursor', + CopilotResumeDurationMs: 'copilot.resume.duration_ms', + CopilotResumeEventCount: 'copilot.resume.event_count', + CopilotResumeOutcome: 'copilot.resume.outcome', + CopilotResumePollIterations: 'copilot.resume.poll_iterations', + CopilotResumePreviewSessionCount: 'copilot.resume.preview_session_count', + CopilotRoute: 'copilot.route', + CopilotRunAgent: 'copilot.run.agent', + CopilotRunHasCompletedAt: 'copilot.run.has_completed_at', + CopilotRunHasError: 'copilot.run.has_error', + CopilotRunModel: 'copilot.run.model', + CopilotRunParentId: 'copilot.run.parent_id', + CopilotRunProvider: 'copilot.run.provider', + CopilotRunStatus: 'copilot.run.status', + CopilotStopAppendedAssistant: 'copilot.stop.appended_assistant', + CopilotStopBlocksCount: 'copilot.stop.blocks_count', + CopilotStopContentLength: 'copilot.stop.content_length', + CopilotStopOutcome: 'copilot.stop.outcome', + CopilotStream: 'copilot.stream', + CopilotSurface: 'copilot.surface', + CopilotTableId: 'copilot.table.id', + CopilotTableOutcome: 'copilot.table.outcome', + CopilotTableRowCount: 'copilot.table.row_count', + CopilotTableSourceContentBytes: 'copilot.table.source.content_bytes', + CopilotTableSourceFormat: 'copilot.table.source.format', + CopilotTableSourcePath: 'copilot.table.source.path', + CopilotTraceSpanCount: 'copilot.trace.span_count', + CopilotTransport: 'copilot.transport', + CopilotUserMessagePreview: 'copilot.user.message_preview', + CopilotValidateOutcome: 'copilot.validate.outcome', + CopilotVfsFileExtension: 'copilot.vfs.file.extension', + CopilotVfsFileMediaType: 'copilot.vfs.file.media_type', + CopilotVfsFileName: 'copilot.vfs.file.name', + CopilotVfsFileSizeBytes: 'copilot.vfs.file.size_bytes', + CopilotVfsHasAlpha: 'copilot.vfs.has_alpha', + CopilotVfsInputBytes: 'copilot.vfs.input.bytes', + CopilotVfsInputHeight: 'copilot.vfs.input.height', + CopilotVfsInputMediaTypeClaimed: 'copilot.vfs.input.media_type_claimed', + CopilotVfsInputMediaTypeDetected: 'copilot.vfs.input.media_type_detected', + CopilotVfsInputWidth: 'copilot.vfs.input.width', + CopilotVfsMetadataFailed: 'copilot.vfs.metadata.failed', + CopilotVfsOutcome: 'copilot.vfs.outcome', + CopilotVfsOutputBytes: 'copilot.vfs.output.bytes', + CopilotVfsOutputMediaType: 'copilot.vfs.output.media_type', + CopilotVfsReadImageResized: 'copilot.vfs.read.image.resized', + CopilotVfsReadOutcome: 'copilot.vfs.read.outcome', + CopilotVfsReadOutputBytes: 'copilot.vfs.read.output.bytes', + CopilotVfsReadOutputLines: 'copilot.vfs.read.output.lines', + CopilotVfsReadOutputMediaType: 'copilot.vfs.read.output.media_type', + CopilotVfsReadPath: 'copilot.vfs.read.path', + CopilotVfsResizeAttempts: 'copilot.vfs.resize.attempts', + CopilotVfsResizeChosenDimension: 'copilot.vfs.resize.chosen_dimension', + CopilotVfsResizeChosenQuality: 'copilot.vfs.resize.chosen_quality', + CopilotVfsResized: 'copilot.vfs.resized', + CopilotVfsSharpLoadFailed: 'copilot.vfs.sharp.load_failed', + CostDefaultCost: 'cost.default_cost', + CredentialSetId: 'credential_set.id', + CredentialSetName: 'credential_set.name', + DbOperation: 'db.operation', + DbSqlTable: 'db.sql.table', + DbSystem: 'db.system', + DeploymentEnvironment: 'deployment.environment', + DeploymentVersion: 'deployment.version', + DocumentFileSize: 'document.file_size', + DocumentMimeType: 'document.mime_type', + DocumentsCount: 'documents.count', + DocumentsUploadType: 'documents.upload_type', + Error: 'error', + ErrorCode: 'error.code', + ErrorInternal: 'error.internal', + ErrorMessage: 'error.message', + ErrorType: 'error.type', + EventName: 'event.name', + EventTimestamp: 'event.timestamp', + ExecutionBlocksExecuted: 'execution.blocks_executed', + ExecutionDurationMs: 'execution.duration_ms', + ExecutionErrorMessage: 'execution.error_message', + ExecutionHasErrors: 'execution.has_errors', + ExecutionStatus: 'execution.status', + ExecutionTotalCost: 'execution.total_cost', + ExecutionTrigger: 'execution.trigger', + FunctionExecutionTimeMs: 'function.execution_time_ms', + FunctionId: 'function.id', + FunctionName: 'function.name', + GenAiAgentId: 'gen_ai.agent.id', + GenAiAgentName: 'gen_ai.agent.name', + GenAiCostInput: 'gen_ai.cost.input', + GenAiCostOutput: 'gen_ai.cost.output', + GenAiCostTotal: 'gen_ai.cost.total', + GenAiInputMessages: 'gen_ai.input.messages', + GenAiOperationName: 'gen_ai.operation.name', + GenAiOutputMessages: 'gen_ai.output.messages', + GenAiRequestAssistantMessages: 'gen_ai.request.assistant_messages', + GenAiRequestContentBlocks: 'gen_ai.request.content_blocks', + GenAiRequestHasCacheControl: 'gen_ai.request.has_cache_control', + GenAiRequestImageBlocks: 'gen_ai.request.image_blocks', + GenAiRequestImageDataBytes: 'gen_ai.request.image_data_bytes', + GenAiRequestMaxMessageBlocks: 'gen_ai.request.max_message_blocks', + GenAiRequestMessagesCount: 'gen_ai.request.messages.count', + GenAiRequestModel: 'gen_ai.request.model', + GenAiRequestSystemChars: 'gen_ai.request.system_chars', + GenAiRequestTextBlocks: 'gen_ai.request.text_blocks', + GenAiRequestToolResultBlocks: 'gen_ai.request.tool_result_blocks', + GenAiRequestToolUseBlocks: 'gen_ai.request.tool_use_blocks', + GenAiRequestToolsCount: 'gen_ai.request.tools.count', + GenAiRequestUserMessages: 'gen_ai.request.user_messages', + GenAiSystem: 'gen_ai.system', + GenAiToolName: 'gen_ai.tool.name', + GenAiUsageCacheCreationTokens: 'gen_ai.usage.cache_creation_tokens', + GenAiUsageCacheReadTokens: 'gen_ai.usage.cache_read_tokens', + GenAiUsageInputTokens: 'gen_ai.usage.input_tokens', + GenAiUsageOutputTokens: 'gen_ai.usage.output_tokens', + GenAiUsageTotalTokens: 'gen_ai.usage.total_tokens', + GenAiWorkflowExecutionId: 'gen_ai.workflow.execution_id', + GenAiWorkflowId: 'gen_ai.workflow.id', + GenAiWorkflowName: 'gen_ai.workflow.name', + HostedKeyEnvVar: 'hosted_key.env_var', + HttpHost: 'http.host', + HttpMethod: 'http.method', + HttpPath: 'http.path', + HttpRemoteAddr: 'http.remote_addr', + HttpRequestContentLength: 'http.request.content_length', + HttpResponseBodyBytes: 'http.response.body_bytes', + HttpResponseContentLength: 'http.response.content_length', + HttpResponseHeadersMs: 'http.response.headers_ms', + HttpResponseTotalMs: 'http.response.total_ms', + HttpServerDurationMs: 'http.server.duration_ms', + HttpStatusCode: 'http.status_code', + HttpTarget: 'http.target', + HttpUrl: 'http.url', + HttpUserAgent: 'http.user_agent', + InvitationRole: 'invitation.role', + KnowledgeBaseId: 'knowledge_base.id', + KnowledgeBaseName: 'knowledge_base.name', + LlmErrorStage: 'llm.error_stage', + LlmRequestBodyBytes: 'llm.request.body_bytes', + LlmStreamBytes: 'llm.stream.bytes', + LlmStreamChunks: 'llm.stream.chunks', + LlmStreamFirstChunkBytes: 'llm.stream.first_chunk_bytes', + LlmStreamFirstChunkMs: 'llm.stream.first_chunk_ms', + LlmStreamOpenMs: 'llm.stream.open_ms', + LlmStreamTotalMs: 'llm.stream.total_ms', + LockAcquired: 'lock.acquired', + LockBackend: 'lock.backend', + LockTimedOut: 'lock.timed_out', + LockTimeoutMs: 'lock.timeout_ms', + LoopId: 'loop.id', + LoopIterations: 'loop.iterations', + LoopName: 'loop.name', + McpExecutionStatus: 'mcp.execution_status', + McpServerId: 'mcp.server_id', + McpServerName: 'mcp.server_name', + McpToolName: 'mcp.tool_name', + McpTransport: 'mcp.transport', + MemberRole: 'member.role', + MemoryContentBytes: 'memory.content_bytes', + MemoryFound: 'memory.found', + MemoryPath: 'memory.path', + MemoryRowCount: 'memory.row_count', + MessageId: 'message.id', + MessagingDestinationName: 'messaging.destination.name', + MessagingSystem: 'messaging.system', + ModelDurationMs: 'model.duration_ms', + ModelId: 'model.id', + ModelName: 'model.name', + MothershipOrigin: 'mothership.origin', + NetPeerName: 'net.peer.name', + OauthProvider: 'oauth.provider', + ParallelBranches: 'parallel.branches', + ParallelId: 'parallel.id', + ParallelName: 'parallel.name', + PrefsToolCount: 'prefs.tool_count', + ProcessingChunkSize: 'processing.chunk_size', + ProcessingRecipe: 'processing.recipe', + ProviderId: 'provider.id', + RateLimitAttempt: 'rate_limit.attempt', + RateLimitCount: 'rate_limit.count', + RateLimitDelayMs: 'rate_limit.delay_ms', + RateLimitLimit: 'rate_limit.limit', + RateLimitMaxRetries: 'rate_limit.max_retries', + RateLimitOutcome: 'rate_limit.outcome', + RateLimitRetryAfterMs: 'rate_limit.retry_after_ms', + RequestGoTraceId: 'request.go_trace_id', + RequestId: 'request.id', + RequiredVersion: 'required.version', + ResumeRequestBodyBytes: 'resume.request.body_bytes', + ResumeResultsCount: 'resume.results.count', + ResumeResultsDataBytes: 'resume.results.data_bytes', + ResumeResultsFailureCount: 'resume.results.failure_count', + ResumeResultsSuccessCount: 'resume.results.success_count', + RouterBackendName: 'router.backend_name', + RouterBedrockEnabled: 'router.bedrock_enabled', + RouterBedrockSupportedModel: 'router.bedrock_supported_model', + RouterId: 'router.id', + RouterName: 'router.name', + RouterSelectedBackend: 'router.selected_backend', + RouterSelectedPath: 'router.selected_path', + RunId: 'run.id', + SearchResultsCount: 'search.results_count', + ServiceInstanceId: 'service.instance.id', + ServiceName: 'service.name', + ServiceNamespace: 'service.namespace', + ServiceVersion: 'service.version', + SettleCompleted: 'settle.completed', + SettleTimeoutMs: 'settle.timeout_ms', + SettleWaitMs: 'settle.wait_ms', + SimOperation: 'sim.operation', + SimRequestId: 'sim.request_id', + SpanDurationMs: 'span.duration_ms', + SpanStatus: 'span.status', + SpanType: 'span.type', + StreamId: 'stream.id', + SubagentId: 'subagent.id', + SubagentOutcomeContentBytes: 'subagent.outcome.content_bytes', + SubagentOutcomeError: 'subagent.outcome.error', + SubagentOutcomeStructuredType: 'subagent.outcome.structured_type', + SubagentOutcomeSuccess: 'subagent.outcome.success', + SubagentOutcomeToolCallCount: 'subagent.outcome.tool_call_count', + TaskAge: 'task.age', + TaskDecisionCount: 'task.decision_count', + TaskErrorCount: 'task.error_count', + TaskFound: 'task.found', + TaskId: 'task.id', + TaskListLimit: 'task.list_limit', + TaskRows: 'task.rows', + TaskStatus: 'task.status', + TaskStepCount: 'task.step_count', + TelemetrySdkLanguage: 'telemetry.sdk.language', + TelemetrySdkName: 'telemetry.sdk.name', + TelemetrySdkVersion: 'telemetry.sdk.version', + TemplateId: 'template.id', + TemplateName: 'template.name', + ThrottleReason: 'throttle.reason', + ToolArgsBytes: 'tool.args.bytes', + ToolArgsCount: 'tool.args.count', + ToolArgsPreview: 'tool.args.preview', + ToolAsyncWaiterPollCount: 'tool.async_waiter.poll_count', + ToolAsyncWaiterPubsubDeliveries: 'tool.async_waiter.pubsub_deliveries', + ToolAsyncWaiterResolution: 'tool.async_waiter.resolution', + ToolCallId: 'tool.call_id', + ToolClientExecutable: 'tool.client_executable', + ToolCompletionReceived: 'tool.completion.received', + ToolConfirmationStatus: 'tool.confirmation.status', + ToolDurationMs: 'tool.duration_ms', + ToolErrorKind: 'tool.error_kind', + ToolExecutor: 'tool.executor', + ToolExternalService: 'tool.external.service', + ToolId: 'tool.id', + ToolName: 'tool.name', + ToolOutcome: 'tool.outcome', + ToolOutcomeMessage: 'tool.outcome.message', + ToolParentSpan: 'tool.parent_span', + ToolPayloadBytes: 'tool.payload.bytes', + ToolResultArtifact: 'tool.result.artifact', + ToolResultBytes: 'tool.result.bytes', + ToolResultSuccess: 'tool.result.success', + ToolScheduled: 'tool.scheduled', + ToolStatus: 'tool.status', + ToolStatusCode: 'tool.status_code', + ToolStoreStatus: 'tool.store_status', + ToolSync: 'tool.sync', + ToolTimeoutMs: 'tool.timeout_ms', + TraceAborted: 'trace.aborted', + TraceBilledTotalCost: 'trace.billed_total_cost', + TraceCacheReadTokens: 'trace.cache_read_tokens', + TraceCacheWriteTokens: 'trace.cache_write_tokens', + TraceDurationMs: 'trace.duration_ms', + TraceError: 'trace.error', + TraceGoId: 'trace.go_id', + TraceInputTokens: 'trace.input_tokens', + TraceModel: 'trace.model', + TraceOutcome: 'trace.outcome', + TraceOutputTokens: 'trace.output_tokens', + TraceProvider: 'trace.provider', + TraceRawTotalCost: 'trace.raw_total_cost', + TraceSpanCount: 'trace.span_count', + TraceToolCallCount: 'trace.tool_call_count', + UserAuthMethod: 'user.auth_method', + UserAuthProvider: 'user.auth_provider', + UserId: 'user.id', + WebhookId: 'webhook.id', + WebhookProvider: 'webhook.provider', + WebhookTriggerSuccess: 'webhook.trigger_success', + WorkflowBlockTypes: 'workflow.block_types', + WorkflowBlocksCount: 'workflow.blocks_count', + WorkflowCreatedId: 'workflow.created_id', + WorkflowDurationMs: 'workflow.duration_ms', + WorkflowEdgesCount: 'workflow.edges_count', + WorkflowExecutionId: 'workflow.execution_id', + WorkflowHasFolder: 'workflow.has_folder', + WorkflowHasWorkspace: 'workflow.has_workspace', + WorkflowId: 'workflow.id', + WorkflowLoopsCount: 'workflow.loops_count', + WorkflowName: 'workflow.name', + WorkflowNewId: 'workflow.new_id', + WorkflowParallelsCount: 'workflow.parallels_count', + WorkflowSourceId: 'workflow.source_id', + WorkflowTrigger: 'workflow.trigger', + WorkspaceId: 'workspace.id', + WorkspaceName: 'workspace.name', +} as const -export type TraceAttrKey = keyof typeof TraceAttr; -export type TraceAttrValue = (typeof TraceAttr)[TraceAttrKey]; +export type TraceAttrKey = keyof typeof TraceAttr +export type TraceAttrValue = (typeof TraceAttr)[TraceAttrKey] /** Readonly sorted list of every canonical custom attribute key. */ export const TraceAttrValues: readonly TraceAttrValue[] = [ - "abort.backend", - "abort.found", - "abort.redis_result", - "analytics.aborted", - "analytics.billed_total_cost", - "analytics.cache_read_tokens", - "analytics.cache_write_tokens", - "analytics.customer_type", - "analytics.duration_ms", - "analytics.error", - "analytics.input_tokens", - "analytics.model", - "analytics.output_tokens", - "analytics.provider", - "analytics.source", - "analytics.tool_call_count", - "api_key.id", - "api_key.name", - "auth.incoming_internal", - "auth.key.match", - "auth.key.preview", - "auth.key.source", - "auth.key.type", - "auth.provider", - "auth.validate.status_code", - "aws.region", - "bedrock.error_code", - "bedrock.model_id", - "bedrock.request.body_bytes_retry", - "billing.attempts", - "billing.change_type", - "billing.cost.input_usd", - "billing.cost.output_usd", - "billing.cost.total_usd", - "billing.cost_usd", - "billing.customer_type", - "billing.duplicate", - "billing.duration_ms", - "billing.has_idempotency_key", - "billing.idempotency_key", - "billing.interval", - "billing.is_mcp", - "billing.llm_cost", - "billing.new_plan", - "billing.outcome", - "billing.plan", - "billing.previous_plan", - "billing.service_charges", - "billing.source", - "billing.total_cost", - "billing.usage.current", - "billing.usage.exceeded", - "billing.usage.limit", - "block.id", - "block.name", - "block.type", - "chat.active_messages_bytes", - "chat.active_messages_count", - "chat.append_bytes", - "chat.append_count", - "chat.artifact_keys", - "chat.artifacts_bytes", - "chat.auth_type", - "chat.context_count", - "chat.context_usage", - "chat.continuation.messages_before", - "chat.continuation.tool_result_bytes", - "chat.continuation.tool_result_failure", - "chat.continuation.tool_result_success", - "chat.continuation.tool_results", - "chat.continuation.total_tool_calls", - "chat.existing_message_count", - "chat.file_attachment_count", - "chat.finalize.outcome", - "chat.found", - "chat.has_assistant_message", - "chat.has_output_configs", - "chat.id", - "chat.message_bytes", - "chat.messages_after", - "chat.messages_bytes", - "chat.messages_count", - "chat.persist.outcome", - "chat.preexisting", - "chat.rollback_index", - "chat.tokens_used", - "chat.type", - "chat.user_message_id", - "checkpoint.age", - "checkpoint.attempts_bytes", - "checkpoint.bytes.assistant_tool_use", - "checkpoint.bytes.current_messages", - "checkpoint.bytes.immediate_results", - "checkpoint.bytes.pending_tool_calls", - "checkpoint.bytes.provider_request", - "checkpoint.bytes.request_context", - "checkpoint.bytes.tool_usage", - "checkpoint.cached_credentials_bytes", - "checkpoint.claimed", - "checkpoint.claimed_now", - "checkpoint.completed_bytes", - "checkpoint.completed_steps", - "checkpoint.current_messages", - "checkpoint.decisions_bytes", - "checkpoint.found", - "checkpoint.frames", - "checkpoint.id", - "checkpoint.immediate_results", - "checkpoint.message_id", - "checkpoint.pending_bytes", - "checkpoint.pending_steps", - "checkpoint.pending_tool_count", - "checkpoint.rows", - "checkpoint.task_id", - "checkpoint.total_tool_calls", - "checkpoint.workflow_snapshot_bytes", - "client.version", - "condition.id", - "condition.name", - "condition.result", - "context.reduce.budget_chars", - "context.reduce.caller", - "context.reduce.did_reduce", - "context.reduce.input_chars", - "context.reduce.input_messages", - "context.reduce.outcome", - "context.reduce.output_chars", - "context.reduce.output_messages", - "context.reduced", - "context.summarize.input_chars", - "context.summarize.output_chars", - "copilot.abort.controller_fired", - "copilot.abort.go_marker_ok", - "copilot.abort.local_aborted", - "copilot.abort.marker_written", - "copilot.abort.outcome", - "copilot.abort.unknown_reason", - "copilot.async_tool.claimed_by", - "copilot.async_tool.has_error", - "copilot.async_tool.ids_count", - "copilot.async_tool.status", - "copilot.async_tool.worker_id", - "copilot.branch.kind", - "copilot.chat.is_new", - "copilot.commands.count", - "copilot.confirm.outcome", - "copilot.contexts.count", - "copilot.file_attachments.count", - "copilot.finalize.outcome", - "copilot.interrupted_prior_stream", - "copilot.leg", - "copilot.mode", - "copilot.operation", - "copilot.output_file.bytes", - "copilot.output_file.format", - "copilot.output_file.id", - "copilot.output_file.name", - "copilot.output_file.outcome", - "copilot.pending_stream.wait_ms", - "copilot.prefetch", - "copilot.publisher.client_disconnected", - "copilot.publisher.saw_complete", - "copilot.recovery.latest_seq", - "copilot.recovery.oldest_seq", - "copilot.recovery.outcome", - "copilot.recovery.requested_after_seq", - "copilot.request.cancel_reason", - "copilot.request.outcome", - "copilot.resource_attachments.count", - "copilot.resources.aborted", - "copilot.resources.op", - "copilot.resources.removed_count", - "copilot.resources.upserted_count", - "copilot.result.content_blocks", - "copilot.result.content_length", - "copilot.result.tool_calls", - "copilot.resume.after_cursor", - "copilot.resume.duration_ms", - "copilot.resume.event_count", - "copilot.resume.outcome", - "copilot.resume.poll_iterations", - "copilot.resume.preview_session_count", - "copilot.route", - "copilot.run.agent", - "copilot.run.has_completed_at", - "copilot.run.has_error", - "copilot.run.model", - "copilot.run.parent_id", - "copilot.run.provider", - "copilot.run.status", - "copilot.stop.appended_assistant", - "copilot.stop.blocks_count", - "copilot.stop.content_length", - "copilot.stop.outcome", - "copilot.stream", - "copilot.surface", - "copilot.table.id", - "copilot.table.outcome", - "copilot.table.row_count", - "copilot.table.source.content_bytes", - "copilot.table.source.format", - "copilot.table.source.path", - "copilot.trace.span_count", - "copilot.transport", - "copilot.user.message_preview", - "copilot.validate.outcome", - "copilot.vfs.file.extension", - "copilot.vfs.file.media_type", - "copilot.vfs.file.name", - "copilot.vfs.file.size_bytes", - "copilot.vfs.has_alpha", - "copilot.vfs.input.bytes", - "copilot.vfs.input.height", - "copilot.vfs.input.media_type_claimed", - "copilot.vfs.input.media_type_detected", - "copilot.vfs.input.width", - "copilot.vfs.metadata.failed", - "copilot.vfs.outcome", - "copilot.vfs.output.bytes", - "copilot.vfs.output.media_type", - "copilot.vfs.read.image.resized", - "copilot.vfs.read.outcome", - "copilot.vfs.read.output.bytes", - "copilot.vfs.read.output.lines", - "copilot.vfs.read.output.media_type", - "copilot.vfs.read.path", - "copilot.vfs.resize.attempts", - "copilot.vfs.resize.chosen_dimension", - "copilot.vfs.resize.chosen_quality", - "copilot.vfs.resized", - "copilot.vfs.sharp.load_failed", - "cost.default_cost", - "credential_set.id", - "credential_set.name", - "db.operation", - "db.sql.table", - "db.system", - "deployment.environment", - "deployment.version", - "document.file_size", - "document.mime_type", - "documents.count", - "documents.upload_type", - "error", - "error.code", - "error.internal", - "error.message", - "error.type", - "event.name", - "event.timestamp", - "execution.blocks_executed", - "execution.duration_ms", - "execution.error_message", - "execution.has_errors", - "execution.status", - "execution.total_cost", - "execution.trigger", - "function.execution_time_ms", - "function.id", - "function.name", - "gen_ai.agent.id", - "gen_ai.agent.name", - "gen_ai.cost.input", - "gen_ai.cost.output", - "gen_ai.cost.total", - "gen_ai.input.messages", - "gen_ai.operation.name", - "gen_ai.output.messages", - "gen_ai.request.assistant_messages", - "gen_ai.request.content_blocks", - "gen_ai.request.has_cache_control", - "gen_ai.request.image_blocks", - "gen_ai.request.image_data_bytes", - "gen_ai.request.max_message_blocks", - "gen_ai.request.messages.count", - "gen_ai.request.model", - "gen_ai.request.system_chars", - "gen_ai.request.text_blocks", - "gen_ai.request.tool_result_blocks", - "gen_ai.request.tool_use_blocks", - "gen_ai.request.tools.count", - "gen_ai.request.user_messages", - "gen_ai.system", - "gen_ai.tool.name", - "gen_ai.usage.cache_creation_tokens", - "gen_ai.usage.cache_read_tokens", - "gen_ai.usage.input_tokens", - "gen_ai.usage.output_tokens", - "gen_ai.usage.total_tokens", - "gen_ai.workflow.execution_id", - "gen_ai.workflow.id", - "gen_ai.workflow.name", - "hosted_key.env_var", - "http.host", - "http.method", - "http.path", - "http.remote_addr", - "http.request.content_length", - "http.response.body_bytes", - "http.response.content_length", - "http.response.headers_ms", - "http.response.total_ms", - "http.server.duration_ms", - "http.status_code", - "http.target", - "http.url", - "http.user_agent", - "invitation.role", - "knowledge_base.id", - "knowledge_base.name", - "llm.error_stage", - "llm.request.body_bytes", - "llm.stream.bytes", - "llm.stream.chunks", - "llm.stream.first_chunk_bytes", - "llm.stream.first_chunk_ms", - "llm.stream.open_ms", - "llm.stream.total_ms", - "lock.acquired", - "lock.backend", - "lock.timed_out", - "lock.timeout_ms", - "loop.id", - "loop.iterations", - "loop.name", - "mcp.execution_status", - "mcp.server_id", - "mcp.server_name", - "mcp.tool_name", - "mcp.transport", - "member.role", - "memory.content_bytes", - "memory.found", - "memory.path", - "memory.row_count", - "message.id", - "messaging.destination.name", - "messaging.system", - "model.duration_ms", - "model.id", - "model.name", - "mothership.origin", - "net.peer.name", - "oauth.provider", - "parallel.branches", - "parallel.id", - "parallel.name", - "prefs.tool_count", - "processing.chunk_size", - "processing.recipe", - "provider.id", - "rate_limit.attempt", - "rate_limit.count", - "rate_limit.delay_ms", - "rate_limit.limit", - "rate_limit.max_retries", - "rate_limit.outcome", - "rate_limit.retry_after_ms", - "request.go_trace_id", - "request.id", - "required.version", - "resume.request.body_bytes", - "resume.results.count", - "resume.results.data_bytes", - "resume.results.failure_count", - "resume.results.success_count", - "router.backend_name", - "router.bedrock_enabled", - "router.bedrock_supported_model", - "router.id", - "router.name", - "router.selected_backend", - "router.selected_path", - "run.id", - "search.results_count", - "service.instance.id", - "service.name", - "service.namespace", - "service.version", - "settle.completed", - "settle.timeout_ms", - "settle.wait_ms", - "sim.operation", - "sim.request_id", - "span.duration_ms", - "span.status", - "span.type", - "stream.id", - "subagent.id", - "subagent.outcome.content_bytes", - "subagent.outcome.error", - "subagent.outcome.structured_type", - "subagent.outcome.success", - "subagent.outcome.tool_call_count", - "task.age", - "task.decision_count", - "task.error_count", - "task.found", - "task.id", - "task.list_limit", - "task.rows", - "task.status", - "task.step_count", - "telemetry.sdk.language", - "telemetry.sdk.name", - "telemetry.sdk.version", - "template.id", - "template.name", - "throttle.reason", - "tool.args.bytes", - "tool.args.count", - "tool.args.preview", - "tool.async_waiter.poll_count", - "tool.async_waiter.pubsub_deliveries", - "tool.async_waiter.resolution", - "tool.call_id", - "tool.client_executable", - "tool.completion.received", - "tool.confirmation.status", - "tool.duration_ms", - "tool.error_kind", - "tool.executor", - "tool.external.service", - "tool.id", - "tool.name", - "tool.outcome", - "tool.outcome.message", - "tool.parent_span", - "tool.payload.bytes", - "tool.result.artifact", - "tool.result.bytes", - "tool.result.success", - "tool.scheduled", - "tool.status", - "tool.status_code", - "tool.store_status", - "tool.sync", - "tool.timeout_ms", - "trace.aborted", - "trace.billed_total_cost", - "trace.cache_read_tokens", - "trace.cache_write_tokens", - "trace.duration_ms", - "trace.error", - "trace.go_id", - "trace.input_tokens", - "trace.model", - "trace.outcome", - "trace.output_tokens", - "trace.provider", - "trace.raw_total_cost", - "trace.span_count", - "trace.tool_call_count", - "user.auth_method", - "user.auth_provider", - "user.id", - "webhook.id", - "webhook.provider", - "webhook.trigger_success", - "workflow.block_types", - "workflow.blocks_count", - "workflow.created_id", - "workflow.duration_ms", - "workflow.edges_count", - "workflow.execution_id", - "workflow.has_folder", - "workflow.has_workspace", - "workflow.id", - "workflow.loops_count", - "workflow.name", - "workflow.new_id", - "workflow.parallels_count", - "workflow.source_id", - "workflow.trigger", - "workspace.id", - "workspace.name", -] as const; + 'abort.backend', + 'abort.found', + 'abort.redis_result', + 'analytics.aborted', + 'analytics.billed_total_cost', + 'analytics.cache_read_tokens', + 'analytics.cache_write_tokens', + 'analytics.customer_type', + 'analytics.duration_ms', + 'analytics.error', + 'analytics.input_tokens', + 'analytics.model', + 'analytics.output_tokens', + 'analytics.provider', + 'analytics.source', + 'analytics.tool_call_count', + 'api_key.id', + 'api_key.name', + 'auth.incoming_internal', + 'auth.key.match', + 'auth.key.preview', + 'auth.key.source', + 'auth.key.type', + 'auth.provider', + 'auth.validate.status_code', + 'aws.region', + 'bedrock.error_code', + 'bedrock.model_id', + 'bedrock.request.body_bytes_retry', + 'billing.attempts', + 'billing.change_type', + 'billing.cost.input_usd', + 'billing.cost.output_usd', + 'billing.cost.total_usd', + 'billing.cost_usd', + 'billing.customer_type', + 'billing.duplicate', + 'billing.duration_ms', + 'billing.has_idempotency_key', + 'billing.idempotency_key', + 'billing.interval', + 'billing.is_mcp', + 'billing.llm_cost', + 'billing.new_plan', + 'billing.outcome', + 'billing.plan', + 'billing.previous_plan', + 'billing.service_charges', + 'billing.source', + 'billing.total_cost', + 'billing.usage.current', + 'billing.usage.exceeded', + 'billing.usage.limit', + 'block.id', + 'block.name', + 'block.type', + 'chat.active_messages_bytes', + 'chat.active_messages_count', + 'chat.append_bytes', + 'chat.append_count', + 'chat.artifact_keys', + 'chat.artifacts_bytes', + 'chat.auth_type', + 'chat.context_count', + 'chat.context_usage', + 'chat.continuation.messages_before', + 'chat.continuation.tool_result_bytes', + 'chat.continuation.tool_result_failure', + 'chat.continuation.tool_result_success', + 'chat.continuation.tool_results', + 'chat.continuation.total_tool_calls', + 'chat.existing_message_count', + 'chat.file_attachment_count', + 'chat.finalize.outcome', + 'chat.found', + 'chat.has_assistant_message', + 'chat.has_output_configs', + 'chat.id', + 'chat.message_bytes', + 'chat.messages_after', + 'chat.messages_bytes', + 'chat.messages_count', + 'chat.persist.outcome', + 'chat.preexisting', + 'chat.rollback_index', + 'chat.tokens_used', + 'chat.type', + 'chat.user_message_id', + 'checkpoint.age', + 'checkpoint.attempts_bytes', + 'checkpoint.bytes.assistant_tool_use', + 'checkpoint.bytes.current_messages', + 'checkpoint.bytes.immediate_results', + 'checkpoint.bytes.pending_tool_calls', + 'checkpoint.bytes.provider_request', + 'checkpoint.bytes.request_context', + 'checkpoint.bytes.tool_usage', + 'checkpoint.cached_credentials_bytes', + 'checkpoint.claimed', + 'checkpoint.claimed_now', + 'checkpoint.completed_bytes', + 'checkpoint.completed_steps', + 'checkpoint.current_messages', + 'checkpoint.decisions_bytes', + 'checkpoint.found', + 'checkpoint.frames', + 'checkpoint.id', + 'checkpoint.immediate_results', + 'checkpoint.message_id', + 'checkpoint.pending_bytes', + 'checkpoint.pending_steps', + 'checkpoint.pending_tool_count', + 'checkpoint.rows', + 'checkpoint.task_id', + 'checkpoint.total_tool_calls', + 'checkpoint.workflow_snapshot_bytes', + 'client.version', + 'condition.id', + 'condition.name', + 'condition.result', + 'context.reduce.budget_chars', + 'context.reduce.caller', + 'context.reduce.did_reduce', + 'context.reduce.input_chars', + 'context.reduce.input_messages', + 'context.reduce.outcome', + 'context.reduce.output_chars', + 'context.reduce.output_messages', + 'context.reduced', + 'context.summarize.input_chars', + 'context.summarize.output_chars', + 'copilot.abort.controller_fired', + 'copilot.abort.go_marker_ok', + 'copilot.abort.local_aborted', + 'copilot.abort.marker_written', + 'copilot.abort.outcome', + 'copilot.abort.unknown_reason', + 'copilot.async_tool.claimed_by', + 'copilot.async_tool.has_error', + 'copilot.async_tool.ids_count', + 'copilot.async_tool.status', + 'copilot.async_tool.worker_id', + 'copilot.branch.kind', + 'copilot.chat.is_new', + 'copilot.commands.count', + 'copilot.confirm.outcome', + 'copilot.contexts.count', + 'copilot.file_attachments.count', + 'copilot.finalize.outcome', + 'copilot.interrupted_prior_stream', + 'copilot.leg', + 'copilot.mode', + 'copilot.operation', + 'copilot.output_file.bytes', + 'copilot.output_file.format', + 'copilot.output_file.id', + 'copilot.output_file.name', + 'copilot.output_file.outcome', + 'copilot.pending_stream.wait_ms', + 'copilot.prefetch', + 'copilot.publisher.client_disconnected', + 'copilot.publisher.saw_complete', + 'copilot.recovery.latest_seq', + 'copilot.recovery.oldest_seq', + 'copilot.recovery.outcome', + 'copilot.recovery.requested_after_seq', + 'copilot.request.cancel_reason', + 'copilot.request.outcome', + 'copilot.resource_attachments.count', + 'copilot.resources.aborted', + 'copilot.resources.op', + 'copilot.resources.removed_count', + 'copilot.resources.upserted_count', + 'copilot.result.content_blocks', + 'copilot.result.content_length', + 'copilot.result.tool_calls', + 'copilot.resume.after_cursor', + 'copilot.resume.duration_ms', + 'copilot.resume.event_count', + 'copilot.resume.outcome', + 'copilot.resume.poll_iterations', + 'copilot.resume.preview_session_count', + 'copilot.route', + 'copilot.run.agent', + 'copilot.run.has_completed_at', + 'copilot.run.has_error', + 'copilot.run.model', + 'copilot.run.parent_id', + 'copilot.run.provider', + 'copilot.run.status', + 'copilot.stop.appended_assistant', + 'copilot.stop.blocks_count', + 'copilot.stop.content_length', + 'copilot.stop.outcome', + 'copilot.stream', + 'copilot.surface', + 'copilot.table.id', + 'copilot.table.outcome', + 'copilot.table.row_count', + 'copilot.table.source.content_bytes', + 'copilot.table.source.format', + 'copilot.table.source.path', + 'copilot.trace.span_count', + 'copilot.transport', + 'copilot.user.message_preview', + 'copilot.validate.outcome', + 'copilot.vfs.file.extension', + 'copilot.vfs.file.media_type', + 'copilot.vfs.file.name', + 'copilot.vfs.file.size_bytes', + 'copilot.vfs.has_alpha', + 'copilot.vfs.input.bytes', + 'copilot.vfs.input.height', + 'copilot.vfs.input.media_type_claimed', + 'copilot.vfs.input.media_type_detected', + 'copilot.vfs.input.width', + 'copilot.vfs.metadata.failed', + 'copilot.vfs.outcome', + 'copilot.vfs.output.bytes', + 'copilot.vfs.output.media_type', + 'copilot.vfs.read.image.resized', + 'copilot.vfs.read.outcome', + 'copilot.vfs.read.output.bytes', + 'copilot.vfs.read.output.lines', + 'copilot.vfs.read.output.media_type', + 'copilot.vfs.read.path', + 'copilot.vfs.resize.attempts', + 'copilot.vfs.resize.chosen_dimension', + 'copilot.vfs.resize.chosen_quality', + 'copilot.vfs.resized', + 'copilot.vfs.sharp.load_failed', + 'cost.default_cost', + 'credential_set.id', + 'credential_set.name', + 'db.operation', + 'db.sql.table', + 'db.system', + 'deployment.environment', + 'deployment.version', + 'document.file_size', + 'document.mime_type', + 'documents.count', + 'documents.upload_type', + 'error', + 'error.code', + 'error.internal', + 'error.message', + 'error.type', + 'event.name', + 'event.timestamp', + 'execution.blocks_executed', + 'execution.duration_ms', + 'execution.error_message', + 'execution.has_errors', + 'execution.status', + 'execution.total_cost', + 'execution.trigger', + 'function.execution_time_ms', + 'function.id', + 'function.name', + 'gen_ai.agent.id', + 'gen_ai.agent.name', + 'gen_ai.cost.input', + 'gen_ai.cost.output', + 'gen_ai.cost.total', + 'gen_ai.input.messages', + 'gen_ai.operation.name', + 'gen_ai.output.messages', + 'gen_ai.request.assistant_messages', + 'gen_ai.request.content_blocks', + 'gen_ai.request.has_cache_control', + 'gen_ai.request.image_blocks', + 'gen_ai.request.image_data_bytes', + 'gen_ai.request.max_message_blocks', + 'gen_ai.request.messages.count', + 'gen_ai.request.model', + 'gen_ai.request.system_chars', + 'gen_ai.request.text_blocks', + 'gen_ai.request.tool_result_blocks', + 'gen_ai.request.tool_use_blocks', + 'gen_ai.request.tools.count', + 'gen_ai.request.user_messages', + 'gen_ai.system', + 'gen_ai.tool.name', + 'gen_ai.usage.cache_creation_tokens', + 'gen_ai.usage.cache_read_tokens', + 'gen_ai.usage.input_tokens', + 'gen_ai.usage.output_tokens', + 'gen_ai.usage.total_tokens', + 'gen_ai.workflow.execution_id', + 'gen_ai.workflow.id', + 'gen_ai.workflow.name', + 'hosted_key.env_var', + 'http.host', + 'http.method', + 'http.path', + 'http.remote_addr', + 'http.request.content_length', + 'http.response.body_bytes', + 'http.response.content_length', + 'http.response.headers_ms', + 'http.response.total_ms', + 'http.server.duration_ms', + 'http.status_code', + 'http.target', + 'http.url', + 'http.user_agent', + 'invitation.role', + 'knowledge_base.id', + 'knowledge_base.name', + 'llm.error_stage', + 'llm.request.body_bytes', + 'llm.stream.bytes', + 'llm.stream.chunks', + 'llm.stream.first_chunk_bytes', + 'llm.stream.first_chunk_ms', + 'llm.stream.open_ms', + 'llm.stream.total_ms', + 'lock.acquired', + 'lock.backend', + 'lock.timed_out', + 'lock.timeout_ms', + 'loop.id', + 'loop.iterations', + 'loop.name', + 'mcp.execution_status', + 'mcp.server_id', + 'mcp.server_name', + 'mcp.tool_name', + 'mcp.transport', + 'member.role', + 'memory.content_bytes', + 'memory.found', + 'memory.path', + 'memory.row_count', + 'message.id', + 'messaging.destination.name', + 'messaging.system', + 'model.duration_ms', + 'model.id', + 'model.name', + 'mothership.origin', + 'net.peer.name', + 'oauth.provider', + 'parallel.branches', + 'parallel.id', + 'parallel.name', + 'prefs.tool_count', + 'processing.chunk_size', + 'processing.recipe', + 'provider.id', + 'rate_limit.attempt', + 'rate_limit.count', + 'rate_limit.delay_ms', + 'rate_limit.limit', + 'rate_limit.max_retries', + 'rate_limit.outcome', + 'rate_limit.retry_after_ms', + 'request.go_trace_id', + 'request.id', + 'required.version', + 'resume.request.body_bytes', + 'resume.results.count', + 'resume.results.data_bytes', + 'resume.results.failure_count', + 'resume.results.success_count', + 'router.backend_name', + 'router.bedrock_enabled', + 'router.bedrock_supported_model', + 'router.id', + 'router.name', + 'router.selected_backend', + 'router.selected_path', + 'run.id', + 'search.results_count', + 'service.instance.id', + 'service.name', + 'service.namespace', + 'service.version', + 'settle.completed', + 'settle.timeout_ms', + 'settle.wait_ms', + 'sim.operation', + 'sim.request_id', + 'span.duration_ms', + 'span.status', + 'span.type', + 'stream.id', + 'subagent.id', + 'subagent.outcome.content_bytes', + 'subagent.outcome.error', + 'subagent.outcome.structured_type', + 'subagent.outcome.success', + 'subagent.outcome.tool_call_count', + 'task.age', + 'task.decision_count', + 'task.error_count', + 'task.found', + 'task.id', + 'task.list_limit', + 'task.rows', + 'task.status', + 'task.step_count', + 'telemetry.sdk.language', + 'telemetry.sdk.name', + 'telemetry.sdk.version', + 'template.id', + 'template.name', + 'throttle.reason', + 'tool.args.bytes', + 'tool.args.count', + 'tool.args.preview', + 'tool.async_waiter.poll_count', + 'tool.async_waiter.pubsub_deliveries', + 'tool.async_waiter.resolution', + 'tool.call_id', + 'tool.client_executable', + 'tool.completion.received', + 'tool.confirmation.status', + 'tool.duration_ms', + 'tool.error_kind', + 'tool.executor', + 'tool.external.service', + 'tool.id', + 'tool.name', + 'tool.outcome', + 'tool.outcome.message', + 'tool.parent_span', + 'tool.payload.bytes', + 'tool.result.artifact', + 'tool.result.bytes', + 'tool.result.success', + 'tool.scheduled', + 'tool.status', + 'tool.status_code', + 'tool.store_status', + 'tool.sync', + 'tool.timeout_ms', + 'trace.aborted', + 'trace.billed_total_cost', + 'trace.cache_read_tokens', + 'trace.cache_write_tokens', + 'trace.duration_ms', + 'trace.error', + 'trace.go_id', + 'trace.input_tokens', + 'trace.model', + 'trace.outcome', + 'trace.output_tokens', + 'trace.provider', + 'trace.raw_total_cost', + 'trace.span_count', + 'trace.tool_call_count', + 'user.auth_method', + 'user.auth_provider', + 'user.id', + 'webhook.id', + 'webhook.provider', + 'webhook.trigger_success', + 'workflow.block_types', + 'workflow.blocks_count', + 'workflow.created_id', + 'workflow.duration_ms', + 'workflow.edges_count', + 'workflow.execution_id', + 'workflow.has_folder', + 'workflow.has_workspace', + 'workflow.id', + 'workflow.loops_count', + 'workflow.name', + 'workflow.new_id', + 'workflow.parallels_count', + 'workflow.source_id', + 'workflow.trigger', + 'workspace.id', + 'workspace.name', +] as const diff --git a/apps/sim/lib/copilot/generated/trace-events-v1.ts b/apps/sim/lib/copilot/generated/trace-events-v1.ts index 2a23d7e3f38..056ccbe2946 100644 --- a/apps/sim/lib/copilot/generated/trace-events-v1.ts +++ b/apps/sim/lib/copilot/generated/trace-events-v1.ts @@ -10,35 +10,35 @@ // become compile errors. export const TraceEvent = { - BedrockInvokeRetryWithoutImages: "bedrock.invoke.retry_without_images", - CopilotOutputFileError: "copilot.output_file.error", - CopilotTableError: "copilot.table.error", - CopilotVfsParseFailed: "copilot.vfs.parse_failed", - CopilotVfsResizeAttempt: "copilot.vfs.resize_attempt", - CopilotVfsResizeAttemptFailed: "copilot.vfs.resize_attempt_failed", - LlmInvokeSent: "llm.invoke.sent", - LlmStreamFirstChunk: "llm.stream.first_chunk", - LlmStreamOpened: "llm.stream.opened", - PgNotifyFailed: "pg_notify_failed", - RedisSubscribed: "redis.subscribed", - RequestCancelled: "request.cancelled", -} as const; + BedrockInvokeRetryWithoutImages: 'bedrock.invoke.retry_without_images', + CopilotOutputFileError: 'copilot.output_file.error', + CopilotTableError: 'copilot.table.error', + CopilotVfsParseFailed: 'copilot.vfs.parse_failed', + CopilotVfsResizeAttempt: 'copilot.vfs.resize_attempt', + CopilotVfsResizeAttemptFailed: 'copilot.vfs.resize_attempt_failed', + LlmInvokeSent: 'llm.invoke.sent', + LlmStreamFirstChunk: 'llm.stream.first_chunk', + LlmStreamOpened: 'llm.stream.opened', + PgNotifyFailed: 'pg_notify_failed', + RedisSubscribed: 'redis.subscribed', + RequestCancelled: 'request.cancelled', +} as const -export type TraceEventKey = keyof typeof TraceEvent; -export type TraceEventValue = (typeof TraceEvent)[TraceEventKey]; +export type TraceEventKey = keyof typeof TraceEvent +export type TraceEventValue = (typeof TraceEvent)[TraceEventKey] /** Readonly sorted list of every canonical event name. */ export const TraceEventValues: readonly TraceEventValue[] = [ - "bedrock.invoke.retry_without_images", - "copilot.output_file.error", - "copilot.table.error", - "copilot.vfs.parse_failed", - "copilot.vfs.resize_attempt", - "copilot.vfs.resize_attempt_failed", - "llm.invoke.sent", - "llm.stream.first_chunk", - "llm.stream.opened", - "pg_notify_failed", - "redis.subscribed", - "request.cancelled", -] as const; + 'bedrock.invoke.retry_without_images', + 'copilot.output_file.error', + 'copilot.table.error', + 'copilot.vfs.parse_failed', + 'copilot.vfs.resize_attempt', + 'copilot.vfs.resize_attempt_failed', + 'llm.invoke.sent', + 'llm.stream.first_chunk', + 'llm.stream.opened', + 'pg_notify_failed', + 'redis.subscribed', + 'request.cancelled', +] as const diff --git a/apps/sim/lib/copilot/generated/trace-spans-v1.ts b/apps/sim/lib/copilot/generated/trace-spans-v1.ts index 7c767e7735c..1c97f0dd009 100644 --- a/apps/sim/lib/copilot/generated/trace-spans-v1.ts +++ b/apps/sim/lib/copilot/generated/trace-spans-v1.ts @@ -9,143 +9,143 @@ // single source of truth and typos become compile errors. export const TraceSpan = { - AnthropicCountTokens: "anthropic.count_tokens", - AsyncToolStoreSet: "async_tool_store.set", - AuthRateLimitRecord: "auth.rate_limit.record", - AuthValidateKey: "auth.validate_key", - ChatContinueWithToolResults: "chat.continue_with_tool_results", - ChatExplicitAbortConsume: "chat.explicit_abort.consume", - ChatExplicitAbortFlushPausedBilling: "chat.explicit_abort.flush_paused_billing", - ChatExplicitAbortHandle: "chat.explicit_abort.handle", - ChatExplicitAbortMark: "chat.explicit_abort.mark", - ChatExplicitAbortPeek: "chat.explicit_abort.peek", - ChatGateAcquire: "chat.gate.acquire", - ChatPersistAfterDone: "chat.persist_after_done", - ChatSetup: "chat.setup", - ContextReduce: "context.reduce", - ContextSummarizeChunk: "context.summarize_chunk", - CopilotAnalyticsFlush: "copilot.analytics.flush", - CopilotAnalyticsSaveRequest: "copilot.analytics.save_request", - CopilotAnalyticsUpdateBilling: "copilot.analytics.update_billing", - CopilotAsyncRunsClaimCompleted: "copilot.async_runs.claim_completed", - CopilotAsyncRunsCreateRunCheckpoint: "copilot.async_runs.create_run_checkpoint", - CopilotAsyncRunsCreateRunSegment: "copilot.async_runs.create_run_segment", - CopilotAsyncRunsGetAsyncToolCall: "copilot.async_runs.get_async_tool_call", - CopilotAsyncRunsGetLatestForExecution: "copilot.async_runs.get_latest_for_execution", - CopilotAsyncRunsGetLatestForStream: "copilot.async_runs.get_latest_for_stream", - CopilotAsyncRunsGetMany: "copilot.async_runs.get_many", - CopilotAsyncRunsGetRunSegment: "copilot.async_runs.get_run_segment", - CopilotAsyncRunsListForRun: "copilot.async_runs.list_for_run", - CopilotAsyncRunsMarkAsyncToolStatus: "copilot.async_runs.mark_async_tool_status", - CopilotAsyncRunsReleaseClaim: "copilot.async_runs.release_claim", - CopilotAsyncRunsUpdateRunStatus: "copilot.async_runs.update_run_status", - CopilotAsyncRunsUpsertAsyncToolCall: "copilot.async_runs.upsert_async_tool_call", - CopilotAuthValidateApiKey: "copilot.auth.validate_api_key", - CopilotBillingUpdateCost: "copilot.billing.update_cost", - CopilotChatAbortActiveStream: "copilot.chat.abort_active_stream", - CopilotChatAbortStream: "copilot.chat.abort_stream", - CopilotChatAbortWaitSettle: "copilot.chat.abort_wait_settle", - CopilotChatAcquirePendingStreamLock: "copilot.chat.acquire_pending_stream_lock", - CopilotChatBuildExecutionContext: "copilot.chat.build_execution_context", - CopilotChatBuildPayload: "copilot.chat.build_payload", - CopilotChatBuildWorkspaceContext: "copilot.chat.build_workspace_context", - CopilotChatFinalizeAssistantTurn: "copilot.chat.finalize_assistant_turn", - CopilotChatPersistUserMessage: "copilot.chat.persist_user_message", - CopilotChatResolveAgentContexts: "copilot.chat.resolve_agent_contexts", - CopilotChatResolveBranch: "copilot.chat.resolve_branch", - CopilotChatResolveOrCreateChat: "copilot.chat.resolve_or_create_chat", - CopilotChatStopStream: "copilot.chat.stop_stream", - CopilotConfirmToolResult: "copilot.confirm.tool_result", - CopilotFinalizeStream: "copilot.finalize_stream", - CopilotRecoveryCheckReplayGap: "copilot.recovery.check_replay_gap", - CopilotResumeRequest: "copilot.resume.request", - CopilotSubagentExecute: "copilot.subagent.execute", - CopilotToolWaitForClientResult: "copilot.tool.wait_for_client_result", - CopilotToolsHandleResourceSideEffects: "copilot.tools.handle_resource_side_effects", - CopilotToolsWriteCsvToTable: "copilot.tools.write_csv_to_table", - CopilotToolsWriteOutputFile: "copilot.tools.write_output_file", - CopilotToolsWriteOutputTable: "copilot.tools.write_output_table", - CopilotVfsPrepareImage: "copilot.vfs.prepare_image", - CopilotVfsReadFile: "copilot.vfs.read_file", - GenAiAgentExecute: "gen_ai.agent.execute", - LlmStream: "llm.stream", - ProviderRouterCountTokens: "provider.router.count_tokens", - ProviderRouterRoute: "provider.router.route", - SimUpdateCost: "sim.update_cost", - SimValidateApiKey: "sim.validate_api_key", - ToolAsyncWaiterWait: "tool.async_waiter.wait", - ToolExecute: "tool.execute", -} as const; + AnthropicCountTokens: 'anthropic.count_tokens', + AsyncToolStoreSet: 'async_tool_store.set', + AuthRateLimitRecord: 'auth.rate_limit.record', + AuthValidateKey: 'auth.validate_key', + ChatContinueWithToolResults: 'chat.continue_with_tool_results', + ChatExplicitAbortConsume: 'chat.explicit_abort.consume', + ChatExplicitAbortFlushPausedBilling: 'chat.explicit_abort.flush_paused_billing', + ChatExplicitAbortHandle: 'chat.explicit_abort.handle', + ChatExplicitAbortMark: 'chat.explicit_abort.mark', + ChatExplicitAbortPeek: 'chat.explicit_abort.peek', + ChatGateAcquire: 'chat.gate.acquire', + ChatPersistAfterDone: 'chat.persist_after_done', + ChatSetup: 'chat.setup', + ContextReduce: 'context.reduce', + ContextSummarizeChunk: 'context.summarize_chunk', + CopilotAnalyticsFlush: 'copilot.analytics.flush', + CopilotAnalyticsSaveRequest: 'copilot.analytics.save_request', + CopilotAnalyticsUpdateBilling: 'copilot.analytics.update_billing', + CopilotAsyncRunsClaimCompleted: 'copilot.async_runs.claim_completed', + CopilotAsyncRunsCreateRunCheckpoint: 'copilot.async_runs.create_run_checkpoint', + CopilotAsyncRunsCreateRunSegment: 'copilot.async_runs.create_run_segment', + CopilotAsyncRunsGetAsyncToolCall: 'copilot.async_runs.get_async_tool_call', + CopilotAsyncRunsGetLatestForExecution: 'copilot.async_runs.get_latest_for_execution', + CopilotAsyncRunsGetLatestForStream: 'copilot.async_runs.get_latest_for_stream', + CopilotAsyncRunsGetMany: 'copilot.async_runs.get_many', + CopilotAsyncRunsGetRunSegment: 'copilot.async_runs.get_run_segment', + CopilotAsyncRunsListForRun: 'copilot.async_runs.list_for_run', + CopilotAsyncRunsMarkAsyncToolStatus: 'copilot.async_runs.mark_async_tool_status', + CopilotAsyncRunsReleaseClaim: 'copilot.async_runs.release_claim', + CopilotAsyncRunsUpdateRunStatus: 'copilot.async_runs.update_run_status', + CopilotAsyncRunsUpsertAsyncToolCall: 'copilot.async_runs.upsert_async_tool_call', + CopilotAuthValidateApiKey: 'copilot.auth.validate_api_key', + CopilotBillingUpdateCost: 'copilot.billing.update_cost', + CopilotChatAbortActiveStream: 'copilot.chat.abort_active_stream', + CopilotChatAbortStream: 'copilot.chat.abort_stream', + CopilotChatAbortWaitSettle: 'copilot.chat.abort_wait_settle', + CopilotChatAcquirePendingStreamLock: 'copilot.chat.acquire_pending_stream_lock', + CopilotChatBuildExecutionContext: 'copilot.chat.build_execution_context', + CopilotChatBuildPayload: 'copilot.chat.build_payload', + CopilotChatBuildWorkspaceContext: 'copilot.chat.build_workspace_context', + CopilotChatFinalizeAssistantTurn: 'copilot.chat.finalize_assistant_turn', + CopilotChatPersistUserMessage: 'copilot.chat.persist_user_message', + CopilotChatResolveAgentContexts: 'copilot.chat.resolve_agent_contexts', + CopilotChatResolveBranch: 'copilot.chat.resolve_branch', + CopilotChatResolveOrCreateChat: 'copilot.chat.resolve_or_create_chat', + CopilotChatStopStream: 'copilot.chat.stop_stream', + CopilotConfirmToolResult: 'copilot.confirm.tool_result', + CopilotFinalizeStream: 'copilot.finalize_stream', + CopilotRecoveryCheckReplayGap: 'copilot.recovery.check_replay_gap', + CopilotResumeRequest: 'copilot.resume.request', + CopilotSubagentExecute: 'copilot.subagent.execute', + CopilotToolWaitForClientResult: 'copilot.tool.wait_for_client_result', + CopilotToolsHandleResourceSideEffects: 'copilot.tools.handle_resource_side_effects', + CopilotToolsWriteCsvToTable: 'copilot.tools.write_csv_to_table', + CopilotToolsWriteOutputFile: 'copilot.tools.write_output_file', + CopilotToolsWriteOutputTable: 'copilot.tools.write_output_table', + CopilotVfsPrepareImage: 'copilot.vfs.prepare_image', + CopilotVfsReadFile: 'copilot.vfs.read_file', + GenAiAgentExecute: 'gen_ai.agent.execute', + LlmStream: 'llm.stream', + ProviderRouterCountTokens: 'provider.router.count_tokens', + ProviderRouterRoute: 'provider.router.route', + SimUpdateCost: 'sim.update_cost', + SimValidateApiKey: 'sim.validate_api_key', + ToolAsyncWaiterWait: 'tool.async_waiter.wait', + ToolExecute: 'tool.execute', +} as const -export type TraceSpanKey = keyof typeof TraceSpan; -export type TraceSpanValue = (typeof TraceSpan)[TraceSpanKey]; +export type TraceSpanKey = keyof typeof TraceSpan +export type TraceSpanValue = (typeof TraceSpan)[TraceSpanKey] /** Readonly sorted list of every canonical span name. */ export const TraceSpanValues: readonly TraceSpanValue[] = [ - "anthropic.count_tokens", - "async_tool_store.set", - "auth.rate_limit.record", - "auth.validate_key", - "chat.continue_with_tool_results", - "chat.explicit_abort.consume", - "chat.explicit_abort.flush_paused_billing", - "chat.explicit_abort.handle", - "chat.explicit_abort.mark", - "chat.explicit_abort.peek", - "chat.gate.acquire", - "chat.persist_after_done", - "chat.setup", - "context.reduce", - "context.summarize_chunk", - "copilot.analytics.flush", - "copilot.analytics.save_request", - "copilot.analytics.update_billing", - "copilot.async_runs.claim_completed", - "copilot.async_runs.create_run_checkpoint", - "copilot.async_runs.create_run_segment", - "copilot.async_runs.get_async_tool_call", - "copilot.async_runs.get_latest_for_execution", - "copilot.async_runs.get_latest_for_stream", - "copilot.async_runs.get_many", - "copilot.async_runs.get_run_segment", - "copilot.async_runs.list_for_run", - "copilot.async_runs.mark_async_tool_status", - "copilot.async_runs.release_claim", - "copilot.async_runs.update_run_status", - "copilot.async_runs.upsert_async_tool_call", - "copilot.auth.validate_api_key", - "copilot.billing.update_cost", - "copilot.chat.abort_active_stream", - "copilot.chat.abort_stream", - "copilot.chat.abort_wait_settle", - "copilot.chat.acquire_pending_stream_lock", - "copilot.chat.build_execution_context", - "copilot.chat.build_payload", - "copilot.chat.build_workspace_context", - "copilot.chat.finalize_assistant_turn", - "copilot.chat.persist_user_message", - "copilot.chat.resolve_agent_contexts", - "copilot.chat.resolve_branch", - "copilot.chat.resolve_or_create_chat", - "copilot.chat.stop_stream", - "copilot.confirm.tool_result", - "copilot.finalize_stream", - "copilot.recovery.check_replay_gap", - "copilot.resume.request", - "copilot.subagent.execute", - "copilot.tool.wait_for_client_result", - "copilot.tools.handle_resource_side_effects", - "copilot.tools.write_csv_to_table", - "copilot.tools.write_output_file", - "copilot.tools.write_output_table", - "copilot.vfs.prepare_image", - "copilot.vfs.read_file", - "gen_ai.agent.execute", - "llm.stream", - "provider.router.count_tokens", - "provider.router.route", - "sim.update_cost", - "sim.validate_api_key", - "tool.async_waiter.wait", - "tool.execute", -] as const; + 'anthropic.count_tokens', + 'async_tool_store.set', + 'auth.rate_limit.record', + 'auth.validate_key', + 'chat.continue_with_tool_results', + 'chat.explicit_abort.consume', + 'chat.explicit_abort.flush_paused_billing', + 'chat.explicit_abort.handle', + 'chat.explicit_abort.mark', + 'chat.explicit_abort.peek', + 'chat.gate.acquire', + 'chat.persist_after_done', + 'chat.setup', + 'context.reduce', + 'context.summarize_chunk', + 'copilot.analytics.flush', + 'copilot.analytics.save_request', + 'copilot.analytics.update_billing', + 'copilot.async_runs.claim_completed', + 'copilot.async_runs.create_run_checkpoint', + 'copilot.async_runs.create_run_segment', + 'copilot.async_runs.get_async_tool_call', + 'copilot.async_runs.get_latest_for_execution', + 'copilot.async_runs.get_latest_for_stream', + 'copilot.async_runs.get_many', + 'copilot.async_runs.get_run_segment', + 'copilot.async_runs.list_for_run', + 'copilot.async_runs.mark_async_tool_status', + 'copilot.async_runs.release_claim', + 'copilot.async_runs.update_run_status', + 'copilot.async_runs.upsert_async_tool_call', + 'copilot.auth.validate_api_key', + 'copilot.billing.update_cost', + 'copilot.chat.abort_active_stream', + 'copilot.chat.abort_stream', + 'copilot.chat.abort_wait_settle', + 'copilot.chat.acquire_pending_stream_lock', + 'copilot.chat.build_execution_context', + 'copilot.chat.build_payload', + 'copilot.chat.build_workspace_context', + 'copilot.chat.finalize_assistant_turn', + 'copilot.chat.persist_user_message', + 'copilot.chat.resolve_agent_contexts', + 'copilot.chat.resolve_branch', + 'copilot.chat.resolve_or_create_chat', + 'copilot.chat.stop_stream', + 'copilot.confirm.tool_result', + 'copilot.finalize_stream', + 'copilot.recovery.check_replay_gap', + 'copilot.resume.request', + 'copilot.subagent.execute', + 'copilot.tool.wait_for_client_result', + 'copilot.tools.handle_resource_side_effects', + 'copilot.tools.write_csv_to_table', + 'copilot.tools.write_output_file', + 'copilot.tools.write_output_table', + 'copilot.vfs.prepare_image', + 'copilot.vfs.read_file', + 'gen_ai.agent.execute', + 'llm.stream', + 'provider.router.count_tokens', + 'provider.router.route', + 'sim.update_cost', + 'sim.validate_api_key', + 'tool.async_waiter.wait', + 'tool.execute', +] as const diff --git a/apps/sim/lib/copilot/request/go/fetch.ts b/apps/sim/lib/copilot/request/go/fetch.ts index 7e92583ab58..529b05d3a84 100644 --- a/apps/sim/lib/copilot/request/go/fetch.ts +++ b/apps/sim/lib/copilot/request/go/fetch.ts @@ -1,7 +1,7 @@ import { type Context, context, SpanStatusCode, trace } from '@opentelemetry/api' +import { CopilotLeg } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { traceHeaders } from '@/lib/copilot/request/go/propagation' -import { CopilotLeg } from '@/lib/copilot/generated/trace-attribute-values-v1' // Lazy tracer resolution: module-level `trace.getTracer()` can be evaluated // before `instrumentation-node.ts` installs the TracerProvider under diff --git a/apps/sim/lib/copilot/request/lifecycle/headless.ts b/apps/sim/lib/copilot/request/lifecycle/headless.ts index fd239396808..2196f8b108a 100644 --- a/apps/sim/lib/copilot/request/lifecycle/headless.ts +++ b/apps/sim/lib/copilot/request/lifecycle/headless.ts @@ -74,6 +74,11 @@ export async function runHeadlessCopilotLifecycle( ) try { + // Best-effort extraction of the prompt from the untyped + // headless payload. Keeps parity with the streaming path + // where `message` is destructured directly. + const userMessage = + typeof requestPayload.message === 'string' ? requestPayload.message : undefined await reportTrace( trace.build({ outcome, @@ -81,6 +86,7 @@ export async function runHeadlessCopilotLifecycle( chatId: result?.chatId ?? options.chatId, runId: options.runId, executionId: options.executionId, + userMessage, usage: result?.usage, cost: result?.cost, }), diff --git a/apps/sim/lib/copilot/request/lifecycle/run.ts b/apps/sim/lib/copilot/request/lifecycle/run.ts index 65149c406b1..ae504a6e596 100644 --- a/apps/sim/lib/copilot/request/lifecycle/run.ts +++ b/apps/sim/lib/copilot/request/lifecycle/run.ts @@ -250,7 +250,13 @@ async function runCheckpointLoop( 'Content-Type': 'application/json', ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), 'X-Client-Version': SIM_AGENT_VERSION, - ...(options.simRequestId ? { 'X-Sim-Request-ID': options.simRequestId } : {}), + // X-Sim-Request-ID header removed: Sim's logical request ID + // is now the OTel trace ID of the chat POST's root span, + // propagated to Go via the W3C `traceparent` header + // (injected by fetchGo below). Go's `RequestIdentity` picks + // the trace ID up from the extracted context automatically + // when no Sim-Request-ID header is present. Go keeps the + // reader around for back-compat with older Sim deploys. }, body: JSON.stringify(payload), }, diff --git a/apps/sim/lib/copilot/request/lifecycle/start.ts b/apps/sim/lib/copilot/request/lifecycle/start.ts index a7b73246351..40903a20fb4 100644 --- a/apps/sim/lib/copilot/request/lifecycle/start.ts +++ b/apps/sim/lib/copilot/request/lifecycle/start.ts @@ -26,8 +26,8 @@ import { type CopilotLifecycleOutcome, startCopilotOtelRoot } from '@/lib/copilo import { cleanupAbortMarker, clearFilePreviewSessions, - registerActiveStream, isExplicitStopReason, + registerActiveStream, releasePendingChatStream, resetBuffer, StreamWriter, @@ -173,10 +173,7 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS chatId, reason: serializedReason, }) - activeOtelRoot.span.setAttribute( - TraceAttr.CopilotAbortUnknownReason, - serializedReason - ) + activeOtelRoot.span.setAttribute(TraceAttr.CopilotAbortUnknownReason, serializedReason) } activeOtelRoot.span.setAttribute(TraceAttr.CopilotRequestCancelReason, cancelReason) activeOtelRoot.span.addEvent(TraceEvent.RequestCancelled, { @@ -308,11 +305,8 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS // Error-path classification: if the abort signal fired or // the client disconnected, treat the thrown error as a // cancel (same rationale as the try-path above). - const wasCancelled = - abortController.signal.aborted || publisher.clientDisconnected - outcome = wasCancelled - ? RequestTraceV1Outcome.cancelled - : RequestTraceV1Outcome.error + const wasCancelled = abortController.signal.aborted || publisher.clientDisconnected + outcome = wasCancelled ? RequestTraceV1Outcome.cancelled : RequestTraceV1Outcome.error if (outcome === RequestTraceV1Outcome.cancelled) { recordCancelled(error instanceof Error ? error.message : String(error)) } @@ -364,6 +358,12 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS chatId, runId, executionId, + // Pass the raw user prompt through so the Go-side trace + // ingest can stamp it onto the `request_traces.message` + // column at insert time. Avoids relying on the late + // `UpdateAnalytics` UPDATE (which silently misses many + // rows). + userMessage: message, usage: lifecycleResult?.usage, cost: lifecycleResult?.cost, }) diff --git a/apps/sim/lib/copilot/request/otel.ts b/apps/sim/lib/copilot/request/otel.ts index c42b0c705db..ae6ef9d887f 100644 --- a/apps/sim/lib/copilot/request/otel.ts +++ b/apps/sim/lib/copilot/request/otel.ts @@ -11,10 +11,13 @@ import { trace, } from '@opentelemetry/api' import { RequestTraceV1Outcome } from '@/lib/copilot/generated/request-trace-v1' +import { + CopilotBranchKind, + CopilotSurface, +} from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { contextFromRequestHeaders } from '@/lib/copilot/request/go/propagation' -import { CopilotBranchKind, CopilotSurface } from '@/lib/copilot/generated/trace-attribute-values-v1' /** * OTel GenAI experimental semantic conventions env var. When set to a @@ -374,7 +377,17 @@ function createFallbackSpanContext(): SpanContext { } export interface CopilotOtelScope { - requestId: string + /** + * Optional override for the logical request ID surfaced on + * `request.id` / `sim.request_id` span attributes. Leave unset on + * the primary chat POST path — `startCopilotOtelRoot` will derive + * it from the newly-created root span's OTel trace ID, which is the + * same 32-hex value that flows through `traceparent` and shows up + * in Grafana. Pass an explicit value only for paths that need a + * non-trace-derived identifier (e.g. headless / resume taking an + * ID from persisted state). + */ + requestId?: string route?: string chatId?: string workflowId?: string @@ -409,13 +422,18 @@ const USER_MESSAGE_PREVIEW_MAX_CHARS = 500 * span to outlive the synchronous handler body — e.g. SSE routes). */ function buildAgentSpanAttributes( - scope: CopilotOtelScope + scope: CopilotOtelScope & { requestId: string } ): Record { const preview = truncateUserMessagePreview(scope.userMessagePreview) return { 'gen_ai.agent.name': 'mothership', 'gen_ai.agent.id': scope.transport === 'stream' ? 'mothership-stream' : 'mothership-headless', 'gen_ai.operation.name': scope.transport === 'stream' ? 'chat' : 'invoke_agent', + // `request.id` and `sim.request_id` intentionally carry the SAME + // value. For chat POSTs (where scope.requestId is not provided + // by the caller) this is the OTel trace ID of this root span — + // meaning the value pasted from the UI's "copy request ID" + // button works directly in Grafana's trace-ID search box. 'request.id': scope.requestId, 'sim.request_id': scope.requestId, 'copilot.route': scope.route ?? '', @@ -534,7 +552,9 @@ export interface CopilotOtelRoot { setRequestShape: (shape: CopilotOtelRequestShape) => void } -export function startCopilotOtelRoot(scope: CopilotOtelScope): CopilotOtelRoot { +export function startCopilotOtelRoot( + scope: CopilotOtelScope +): CopilotOtelRoot & { requestId: string } { // Create gen_ai.agent.execute as a TRUE root span — do not inherit // from Next.js's HTTP handler span. The framework span is dropped by // our sampler (it has `next.span_type`), so if we parented under it, @@ -543,14 +563,33 @@ export function startCopilotOtelRoot(scope: CopilotOtelScope): CopilotOtelRoot { // disrupted would inherit the same dropped parent. Starting from // ROOT_CONTEXT gives the mothership lifecycle its own clean trace tree. const parentContext = ROOT_CONTEXT + // Start the span FIRST with a placeholder requestId, so we can read + // its actual trace ID and stamp it as the canonical `request.id`. + // This makes the ID the UI exposes (via `msg.requestId`) identical + // to the trace ID Grafana uses — one ID, pasteable anywhere. When + // the caller provided an explicit override (resume / headless / + // tests) we keep that instead. const span = getTracer().startSpan( TraceSpan.GenAiAgentExecute, - { attributes: buildAgentSpanAttributes(scope) }, + { attributes: buildAgentSpanAttributes({ ...scope, requestId: '' }) }, parentContext ) const carrierSpan = isValidSpanContext(span.spanContext()) ? span : trace.wrapSpanContext(createFallbackSpanContext()) + const spanContext = carrierSpan.spanContext() + // Derived ID: use the caller's override when given, otherwise the + // real OTel trace ID. Fall back to an empty string only when OTel + // itself failed to produce a valid span (shouldn't happen in prod + // but the carrier branch above already handles that defensively). + const requestId = + scope.requestId ?? + (spanContext.traceId && spanContext.traceId.length === 32 ? spanContext.traceId : '') + // Re-stamp with the resolved ID (overwriting the placeholder empties + // set above). Cheap — both `request.id` and `sim.request_id` get the + // same value. + span.setAttribute('request.id', requestId) + span.setAttribute('sim.request_id', requestId) const rootContext = trace.setSpan(parentContext, carrierSpan) let finished = false @@ -584,6 +623,10 @@ export function startCopilotOtelRoot(scope: CopilotOtelScope): CopilotOtelRoot { return { span, context: rootContext, + // Surface the resolved requestId so callers can thread it through + // trackers, log prefixes, and persisted `msg.requestId` without + // having to dig it back out of span attributes. + requestId, finish, setInputMessages: (input) => setAgentInputMessages(span, input), setOutputMessages: (output) => setAgentOutputMessages(span, output), @@ -605,7 +648,9 @@ function applyRequestShape(span: Span, shape: CopilotOtelRequestShape): void { span.setAttribute(TraceAttr.CopilotBranchKind, shape.branchKind) span.setAttribute( TraceAttr.CopilotSurface, - shape.branchKind === CopilotBranchKind.Workflow ? CopilotSurface.Copilot : CopilotSurface.Mothership + shape.branchKind === CopilotBranchKind.Workflow + ? CopilotSurface.Copilot + : CopilotSurface.Mothership ) } if (shape.mode) span.setAttribute(TraceAttr.CopilotMode, shape.mode) @@ -646,14 +691,25 @@ export async function withCopilotOtelContext( fn: (otelContext: Context) => Promise ): Promise { const parentContext = context.active() + // Same trace-id-derives-requestId dance as startCopilotOtelRoot — see + // that function for the rationale. Stamp a placeholder, read the real + // trace ID off the span, then overwrite. const span = getTracer().startSpan( TraceSpan.GenAiAgentExecute, - { attributes: buildAgentSpanAttributes(scope) }, + { attributes: buildAgentSpanAttributes({ ...scope, requestId: scope.requestId ?? '' }) }, parentContext ) const carrierSpan = isValidSpanContext(span.spanContext()) ? span : trace.wrapSpanContext(createFallbackSpanContext()) + const spanContext = carrierSpan.spanContext() + const resolvedRequestId = + scope.requestId ?? + (spanContext.traceId && spanContext.traceId.length === 32 ? spanContext.traceId : '') + if (resolvedRequestId) { + span.setAttribute('request.id', resolvedRequestId) + span.setAttribute('sim.request_id', resolvedRequestId) + } const otelContext = trace.setSpan(parentContext, carrierSpan) let terminalStatusSet = false diff --git a/apps/sim/lib/copilot/request/session/abort.ts b/apps/sim/lib/copilot/request/session/abort.ts index 2bbc975a010..ea0dcdb913d 100644 --- a/apps/sim/lib/copilot/request/session/abort.ts +++ b/apps/sim/lib/copilot/request/session/abort.ts @@ -1,10 +1,10 @@ import { createLogger } from '@sim/logger' +import { AbortBackend } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' import { acquireLock, getRedisClient, releaseLock } from '@/lib/core/config/redis' import { clearAbortMarker, hasAbortMarker, writeAbortMarker } from './buffer' -import { AbortBackend } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('SessionAbort') @@ -137,10 +137,7 @@ export async function acquirePendingChatStream( }, async (span) => { const redis = getRedisClient() - span.setAttribute( - TraceAttr.LockBackend, - redis ? AbortBackend.Redis : AbortBackend.InProcess - ) + span.setAttribute(TraceAttr.LockBackend, redis ? AbortBackend.Redis : AbortBackend.InProcess) if (redis) { const deadline = Date.now() + timeoutMs for (;;) { @@ -275,9 +272,7 @@ export type AbortReasonValue = (typeof AbortReason)[keyof typeof AbortReason] * `AbortReason` is presumed non-explicit. */ export function isExplicitStopReason(reason: unknown): boolean { - return ( - reason === AbortReason.UserStop || reason === AbortReason.RedisPoller - ) + return reason === AbortReason.UserStop || reason === AbortReason.RedisPoller } const pollingStreams = new Set() diff --git a/apps/sim/lib/copilot/request/session/index.ts b/apps/sim/lib/copilot/request/session/index.ts index b9a4caf44aa..a09a194c788 100644 --- a/apps/sim/lib/copilot/request/session/index.ts +++ b/apps/sim/lib/copilot/request/session/index.ts @@ -1,7 +1,7 @@ export { - abortActiveStream, AbortReason, type AbortReasonValue, + abortActiveStream, acquirePendingChatStream, cleanupAbortMarker, getPendingChatStreamId, diff --git a/apps/sim/lib/copilot/request/session/recovery.ts b/apps/sim/lib/copilot/request/session/recovery.ts index 7c0e4df6085..c11ba83ea76 100644 --- a/apps/sim/lib/copilot/request/session/recovery.ts +++ b/apps/sim/lib/copilot/request/session/recovery.ts @@ -3,12 +3,12 @@ import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, } from '@/lib/copilot/generated/mothership-stream-v1' +import { CopilotRecoveryOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' import { getLatestSeq, getOldestSeq, readEvents } from './buffer' import { createEvent } from './event' -import { CopilotRecoveryOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('SessionRecovery') diff --git a/apps/sim/lib/copilot/request/subagent.ts b/apps/sim/lib/copilot/request/subagent.ts index 7f36dcba97c..1672bf6570c 100644 --- a/apps/sim/lib/copilot/request/subagent.ts +++ b/apps/sim/lib/copilot/request/subagent.ts @@ -137,7 +137,9 @@ async function orchestrateSubagentStreamInner( 'Content-Type': 'application/json', ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), 'X-Client-Version': SIM_AGENT_VERSION, - ...(options.simRequestId ? { 'X-Sim-Request-ID': options.simRequestId } : {}), + // X-Sim-Request-ID removed — Go derives the logical request + // ID from the propagated W3C `traceparent` now. See + // lifecycle/run.ts for the full rationale. }, body: JSON.stringify({ ...requestPayload, diff --git a/apps/sim/lib/copilot/request/tools/files.ts b/apps/sim/lib/copilot/request/tools/files.ts index 061a69c862d..6b4958e9151 100644 --- a/apps/sim/lib/copilot/request/tools/files.ts +++ b/apps/sim/lib/copilot/request/tools/files.ts @@ -1,11 +1,11 @@ import { createLogger } from '@sim/logger' import { FunctionExecute, UserTable } from '@/lib/copilot/generated/tool-catalog-v1' +import { CopilotOutputFileOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/request/types' import { uploadWorkspaceFile } from '@/lib/uploads/contexts/workspace/workspace-file-manager' -import { CopilotOutputFileOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' const logger = createLogger('CopilotToolResultFiles') diff --git a/apps/sim/lib/copilot/request/tools/tables.ts b/apps/sim/lib/copilot/request/tools/tables.ts index 5c3039af608..1fed85102af 100644 --- a/apps/sim/lib/copilot/request/tools/tables.ts +++ b/apps/sim/lib/copilot/request/tools/tables.ts @@ -4,12 +4,13 @@ import { createLogger } from '@sim/logger' import { parse as csvParse } from 'csv-parse/sync' import { eq } from 'drizzle-orm' import { FunctionExecute, Read as ReadTool } from '@/lib/copilot/generated/tool-catalog-v1' +import { CopilotTableOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/request/types' import { getTableById } from '@/lib/table/service' -import { CopilotTableOutcome, CopilotTableSourceFormat } from '@/lib/copilot/generated/trace-attribute-values-v1' + const logger = createLogger('CopilotToolResultTables') const MAX_OUTPUT_TABLE_ROWS = 10_000 diff --git a/apps/sim/lib/copilot/request/trace.ts b/apps/sim/lib/copilot/request/trace.ts index f73fca3f75d..05d250c77de 100644 --- a/apps/sim/lib/copilot/request/trace.ts +++ b/apps/sim/lib/copilot/request/trace.ts @@ -73,6 +73,13 @@ export class TraceCollector { chatId?: string runId?: string executionId?: string + // Original user prompt, surfaced on the `request_traces.message` + // column at row-insert time so it's queryable from the DB without + // going through Tempo. Sim already has this at chat-POST time; it's + // threaded through here to the trace report so the row is complete + // the moment it's first written instead of waiting on the late + // analytics UPDATE. + userMessage?: string usage?: { prompt: number; completion: number } cost?: { input: number; output: number; total: number } }): RequestTraceV1SimReport { @@ -98,6 +105,7 @@ export class TraceCollector { chatId: params.chatId, runId: params.runId, executionId: params.executionId, + ...(params.userMessage ? { userMessage: params.userMessage } : {}), startMs: this.startMs, endMs, durationMs: endMs - this.startMs, diff --git a/apps/sim/lib/copilot/tools/client/run-tool-execution.ts b/apps/sim/lib/copilot/tools/client/run-tool-execution.ts index 7c741072826..a7c2e57bdf2 100644 --- a/apps/sim/lib/copilot/tools/client/run-tool-execution.ts +++ b/apps/sim/lib/copilot/tools/client/run-tool-execution.ts @@ -1,13 +1,13 @@ import { createLogger } from '@sim/logger' import type { AsyncCompletionData } from '@/lib/copilot/async-runs/lifecycle' import { COPILOT_CONFIRM_API_PATH } from '@/lib/copilot/constants' -import { traceparentHeader } from '@/lib/copilot/tools/client/trace-context' import { MothershipStreamV1ToolOutcome } from '@/lib/copilot/generated/mothership-stream-v1' import { RunBlock, RunFromBlock, RunWorkflowUntilBlock, } from '@/lib/copilot/generated/tool-catalog-v1' +import { traceparentHeader } from '@/lib/copilot/tools/client/trace-context' import { generateId } from '@/lib/core/utils/uuid' import { executeWorkflowWithFullLogging } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils' import { useExecutionStore } from '@/stores/execution/store' diff --git a/apps/sim/lib/copilot/vfs/file-reader.ts b/apps/sim/lib/copilot/vfs/file-reader.ts index 9f710151dab..e8a16b01db7 100644 --- a/apps/sim/lib/copilot/vfs/file-reader.ts +++ b/apps/sim/lib/copilot/vfs/file-reader.ts @@ -1,11 +1,16 @@ import { type Span, SpanStatusCode, trace } from '@opentelemetry/api' import { createLogger } from '@sim/logger' +import { + CopilotVfsOutcome, + CopilotVfsReadOutcome, + CopilotVfsReadPath, +} from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import type { WorkspaceFileRecord } from '@/lib/uploads/contexts/workspace/workspace-file-manager' import { downloadWorkspaceFile } from '@/lib/uploads/contexts/workspace/workspace-file-manager' import { isImageFileType } from '@/lib/uploads/utils/file-utils' -import { CopilotVfsOutcome, CopilotVfsReadOutcome, CopilotVfsReadPath } from '@/lib/copilot/generated/trace-attribute-values-v1' + /** * Lazy tracer (see lib/copilot/request/otel.ts for the same pattern and * why we resolve on every call). From 4ab67069adac8daa8968183e70855166b11221be Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Sat, 18 Apr 2026 18:45:45 -0700 Subject: [PATCH 10/10] Otel --- apps/sim/app/api/billing/update-cost/route.ts | 5 +- .../api/copilot/api-keys/validate/route.ts | 4 +- apps/sim/app/api/copilot/chat/abort/route.ts | 57 +-- apps/sim/app/api/copilot/chat/stop/route.ts | 35 +- apps/sim/app/api/copilot/chat/stream/route.ts | 25 +- apps/sim/app/api/copilot/confirm/route.ts | 22 +- .../message-actions/message-actions.tsx | 15 - .../mothership-chat/mothership-chat.tsx | 30 +- .../[workspaceId]/home/hooks/use-chat.ts | 114 ++---- apps/sim/instrumentation-node.ts | 195 ++-------- apps/sim/lib/copilot/async-runs/repository.ts | 137 +++---- apps/sim/lib/copilot/chat/post.ts | 131 +++---- apps/sim/lib/copilot/chat/terminal-state.ts | 10 +- apps/sim/lib/copilot/constants.ts | 3 - .../generated/trace-attribute-values-v1.ts | 1 + .../copilot/generated/trace-attributes-v1.ts | 18 +- apps/sim/lib/copilot/request/go/fetch.ts | 30 +- apps/sim/lib/copilot/request/go/stream.ts | 7 - apps/sim/lib/copilot/request/handlers/tool.ts | 8 +- apps/sim/lib/copilot/request/http.ts | 2 +- .../lib/copilot/request/lifecycle/finalize.ts | 35 +- .../lib/copilot/request/lifecycle/headless.ts | 3 +- apps/sim/lib/copilot/request/lifecycle/run.ts | 7 - .../lib/copilot/request/lifecycle/start.ts | 93 ++--- apps/sim/lib/copilot/request/otel.ts | 348 ++++-------------- apps/sim/lib/copilot/request/session/abort.ts | 6 +- .../lib/copilot/request/session/recovery.ts | 6 +- apps/sim/lib/copilot/request/subagent.ts | 13 +- apps/sim/lib/copilot/request/tools/files.ts | 9 +- .../lib/copilot/request/tools/resources.ts | 4 +- apps/sim/lib/copilot/request/tools/tables.ts | 21 +- apps/sim/lib/copilot/request/trace.ts | 2 +- .../tools/client/run-tool-execution.ts | 3 - .../lib/copilot/tools/client/trace-context.ts | 53 +-- apps/sim/lib/copilot/tools/handlers/vfs.ts | 4 +- apps/sim/lib/copilot/vfs/file-reader.ts | 52 +-- 36 files changed, 436 insertions(+), 1072 deletions(-) diff --git a/apps/sim/app/api/billing/update-cost/route.ts b/apps/sim/app/api/billing/update-cost/route.ts index 9269324bd57..eef3ac31be0 100644 --- a/apps/sim/app/api/billing/update-cost/route.ts +++ b/apps/sim/app/api/billing/update-cost/route.ts @@ -41,8 +41,8 @@ export async function POST(req: NextRequest) { req.headers, TraceSpan.CopilotBillingUpdateCost, { - 'http.method': 'POST', - 'http.route': '/api/billing/update-cost', + [TraceAttr.HttpMethod]: 'POST', + [TraceAttr.HttpRoute]: '/api/billing/update-cost', }, async (span) => updateCostInner(req, span) ) @@ -95,7 +95,6 @@ async function updateCostInner( if (!validation.success) { logger.warn(`[${requestId}] Invalid request body`, { errors: validation.error.issues, - body, }) span.setAttribute(TraceAttr.BillingOutcome, BillingRouteOutcome.InvalidBody) span.setAttribute(TraceAttr.HttpStatusCode, 400) diff --git a/apps/sim/app/api/copilot/api-keys/validate/route.ts b/apps/sim/app/api/copilot/api-keys/validate/route.ts index f52e2f34ee4..fffc2b9e534 100644 --- a/apps/sim/app/api/copilot/api-keys/validate/route.ts +++ b/apps/sim/app/api/copilot/api-keys/validate/route.ts @@ -26,8 +26,8 @@ export async function POST(req: NextRequest) { req.headers, TraceSpan.CopilotAuthValidateApiKey, { - 'http.method': 'POST', - 'http.route': '/api/copilot/api-keys/validate', + [TraceAttr.HttpMethod]: 'POST', + [TraceAttr.HttpRoute]: '/api/copilot/api-keys/validate', }, async (span) => { try { diff --git a/apps/sim/app/api/copilot/chat/abort/route.ts b/apps/sim/app/api/copilot/chat/abort/route.ts index b3bd74befd5..971c00193da 100644 --- a/apps/sim/app/api/copilot/chat/abort/route.ts +++ b/apps/sim/app/api/copilot/chat/abort/route.ts @@ -15,16 +15,8 @@ const logger = createLogger('CopilotChatAbortAPI') const GO_EXPLICIT_ABORT_TIMEOUT_MS = 3000 const STREAM_ABORT_SETTLE_TIMEOUT_MS = 8000 -/** - * POST /api/copilot/chat/abort - * - * Hang-critical: the client calls this when the user hits "stop". It - * fans out to Go (explicit-abort marker) and then waits up to - * STREAM_ABORT_SETTLE_TIMEOUT_MS (8s) for the prior chat stream to - * unwind. If EITHER the Go fetch or the settle-wait hangs, the user - * sees a "still shutting down" 409 — or worse, an unresolved Promise - * on the client. The spans below pinpoint which phase stalled. - */ +// POST /api/copilot/chat/abort — fires on user Stop; marks the Go +// side aborted then waits for the prior stream to settle. export async function POST(request: Request) { return withIncomingGoSpan( request.headers, @@ -71,36 +63,11 @@ export async function POST(request: Request) { } if (chatId) rootSpan.setAttribute(TraceAttr.ChatId, chatId) - // ORDER MATTERS: local abort FIRST, Go explicit-abort SECOND. - // - // Sim and Go each own a separate Redis instance and do not share - // state through it — the only signal that crosses the service - // boundary is this HTTP call. So the race to win is purely - // Sim-internal: - // - // - `abortActiveStream` flips the AbortController (reason = - // AbortReason.UserStop) that's wrapped around the in-flight - // `fetchGo('/api/mothership', ...)` SSE stream. Once flipped, - // the stream throws AbortError on the next chunk read, and - // the lifecycle catch block's classifier sees - // `signal.aborted = true` with an explicit-stop reason → the - // root span gets stamped `cancel_reason = explicit_stop` and - // the `request.cancelled` event fires correctly. - // - // - If we call Go first (old order), Go's context cancels from - // its own explicit-abort handler, the /api/mothership stream - // errors with "context canceled", and Sim's catch block fires - // BEFORE we've flipped the local AbortController. At that - // point `signal.aborted` is still false, so the classifier - // falls through to `client_disconnect` / `unknown` and the - // root ends up as `outcome = error` — which is what we saw - // in trace 25f31730082078cef54653b1740caf12. - // - // Go's explicit-abort endpoint still runs second: it's what tells - // Go-side billing "this was intentional, flush the paused ledger" - // and is unaffected by the reorder (Go's context is already - // cancelled by the time we get there; the endpoint's job is - // billing semantics, not cancelling in-flight work). + // Local abort before Go — lets the lifecycle classifier see + // `signal.aborted` with an explicit-stop reason before Go's + // context-canceled error propagates back. Go's endpoint runs + // second for billing-ledger flush; Go's context is already + // cancelled by then. const aborted = await abortActiveStream(streamId) rootSpan.setAttribute(TraceAttr.CopilotAbortLocalAborted, aborted) @@ -144,16 +111,12 @@ export async function POST(request: Request) { rootSpan.setAttribute(TraceAttr.CopilotAbortGoMarkerOk, goAbortOk) if (chatId) { - // `waitForPendingChatStream` blocks up to 8s waiting for the - // prior stream's release. It's THE single most likely stall - // point in this handler — isolate it so a slow unwind shows up - // as this child span rather than unexplained root latency. const settled = await withCopilotSpan( TraceSpan.CopilotChatAbortWaitSettle, { - 'chat.id': chatId, - 'stream.id': streamId, - 'settle.timeout_ms': STREAM_ABORT_SETTLE_TIMEOUT_MS, + [TraceAttr.ChatId]: chatId, + [TraceAttr.StreamId]: streamId, + [TraceAttr.SettleTimeoutMs]: STREAM_ABORT_SETTLE_TIMEOUT_MS, }, async (settleSpan) => { const start = Date.now() diff --git a/apps/sim/app/api/copilot/chat/stop/route.ts b/apps/sim/app/api/copilot/chat/stop/route.ts index ea8a9216f03..0505582e8de 100644 --- a/apps/sim/app/api/copilot/chat/stop/route.ts +++ b/apps/sim/app/api/copilot/chat/stop/route.ts @@ -58,27 +58,14 @@ const StopSchema = z.object({ streamId: z.string(), content: z.string(), contentBlocks: z.array(ContentBlockSchema).optional(), - /** - * Optional because older clients may not send it, but strongly - * recommended: without it, the stopped assistant message persisted - * below loses its `requestId`, which breaks the "Copy request ID" - * button in the UI (it's the only handle the user has for filing - * bug reports about a hung / bad turn). - */ + // Optional for older clients; when present, flows into msg.requestId + // so the UI's copy-request-ID button survives a stopped turn. requestId: z.string().optional(), }) -/** - * POST /api/copilot/chat/stop - * Persists partial assistant content when the user stops a stream mid-response. - * Clears conversationId so the server-side onComplete won't duplicate the message. - * The chat stream lock is intentionally left alone here; it is released only once - * the aborted server stream actually unwinds. - * - * Hang-critical: runs a DB SELECT + UPDATE + pubsub publish. A slow DB - * here makes the UI look frozen after the user clicks Stop. The root - * span lets us tell whether stalls are DB-bound or pubsub-bound. - */ +// POST /api/copilot/chat/stop — persists partial assistant content +// when the user stops mid-stream. Lock release is handled by the +// aborted server stream unwinding, not this handler. export async function POST(req: NextRequest) { return withIncomingGoSpan( req.headers, @@ -152,12 +139,7 @@ export async function POST(req: NextRequest) { content, timestamp: new Date().toISOString(), contentBlocks: synthesizedStoppedBlocks, - // Preserve the requestId onto the persisted aborted message - // so the UI's "Copy request ID" button keeps working after - // the chat history refetches and replaces the in-memory - // streaming message with this persisted version. Without - // this, the button blinks out ~1-2s after the user hits - // Stop because the refetched message has no requestId. + // Persist so the UI copy-request-id button survives refetch. ...(requestId ? { requestId } : {}), }) const assistantMessage: PersistedMessage = normalized @@ -187,7 +169,10 @@ export async function POST(req: NextRequest) { } catch (error) { if (error instanceof z.ZodError) { span.setAttribute(TraceAttr.CopilotStopOutcome, CopilotStopOutcome.ValidationError) - return NextResponse.json({ error: 'Invalid request' }, { status: 400 }) + return NextResponse.json( + { error: 'Invalid request data', details: error.errors }, + { status: 400 } + ) } logger.error('Error stopping chat stream:', error) span.setAttribute(TraceAttr.CopilotStopOutcome, CopilotStopOutcome.InternalError) diff --git a/apps/sim/app/api/copilot/chat/stream/route.ts b/apps/sim/app/api/copilot/chat/stream/route.ts index df7a0c86dcc..bb4544c2f24 100644 --- a/apps/sim/app/api/copilot/chat/stream/route.ts +++ b/apps/sim/app/api/copilot/chat/stream/route.ts @@ -1,4 +1,4 @@ -import { context as otelContext, SpanStatusCode, trace } from '@opentelemetry/api' +import { context as otelContext, trace } from '@opentelemetry/api' import { createLogger } from '@sim/logger' import { type NextRequest, NextResponse } from 'next/server' import { getLatestRunForStream } from '@/lib/copilot/async-runs/repository' @@ -11,9 +11,10 @@ import { CopilotTransport, } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { contextFromRequestHeaders } from '@/lib/copilot/request/go/propagation' import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http' -import { getCopilotTracer } from '@/lib/copilot/request/otel' +import { getCopilotTracer, markSpanForError } from '@/lib/copilot/request/otel' import { checkForReplayGap, createEvent, @@ -141,7 +142,7 @@ export async function GET(request: NextRequest) { // linking behavior; no regression. const incomingContext = contextFromRequestHeaders(request.headers) const rootSpan = getCopilotTracer().startSpan( - 'copilot.resume.request', + TraceSpan.CopilotResumeRequest, { attributes: { [TraceAttr.CopilotTransport]: batchMode ? CopilotTransport.Batch : CopilotTransport.Stream, @@ -267,6 +268,16 @@ async function handleResumeRequestBody({ let controllerClosed = false let sawTerminalEvent = false let currentRequestId = extractRunRequestId(run) + // Stamp the logical request id + chat id on the resume root as soon + // as we resolve them from the run row, so TraceQL joins work on + // resume legs the same way they do on the original POST. + if (currentRequestId) { + rootSpan.setAttribute(TraceAttr.RequestId, currentRequestId) + rootSpan.setAttribute(TraceAttr.SimRequestId, currentRequestId) + } + if (run?.chatId) { + rootSpan.setAttribute(TraceAttr.ChatId, run.chatId) + } const closeController = () => { if (controllerClosed) return @@ -298,7 +309,7 @@ async function handleResumeRequestBody({ const events = await readEvents(streamId, cursor) if (events.length > 0) { totalEventsFlushed += events.length - logger.info('[Resume] Flushing events', { + logger.debug('[Resume] Flushing events', { streamId, afterCursor: cursor, eventCount: events.length, @@ -420,11 +431,7 @@ async function handleResumeRequestBody({ reason: 'stream_replay_failed', }) } - rootSpan.setStatus({ - code: SpanStatusCode.ERROR, - message: error instanceof Error ? error.message : String(error), - }) - rootSpan.recordException(error instanceof Error ? error : new Error(String(error))) + markSpanForError(rootSpan, error) } finally { request.signal.removeEventListener('abort', abortListener) closeController() diff --git a/apps/sim/app/api/copilot/confirm/route.ts b/apps/sim/app/api/copilot/confirm/route.ts index 7693496e1ef..aa3f3e90010 100644 --- a/apps/sim/app/api/copilot/confirm/route.ts +++ b/apps/sim/app/api/copilot/confirm/route.ts @@ -116,25 +116,16 @@ async function updateToolCallStatus( } } -/** - * POST /api/copilot/confirm - * Accept client tool completion or detach confirmations. - * - * Hang-critical: this is the delivery path for client-executed tool - * results. If this handler stalls (DB lock, Redis timeout, pubsub - * failure), the `copilot.tool.wait_for_client_result` span on the - * originating chat stream never resolves and the whole request looks - * hung. The root span here gives us per-request visibility so we can - * correlate a slow confirm with the chat-stream that was waiting on it - * via `toolCallId`. - */ +// POST /api/copilot/confirm — delivery path for client-executed tool +// results. Correlate via `toolCallId` when the awaiting chat stream +// stalls. export async function POST(req: NextRequest) { const tracker = createRequestTracker() return withIncomingGoSpan( req.headers, TraceSpan.CopilotConfirmToolResult, - { 'request.id': tracker.requestId }, + { [TraceAttr.RequestId]: tracker.requestId }, async (span) => { try { const { userId: authenticatedUserId, isAuthenticated } = @@ -195,9 +186,8 @@ export async function POST(req: NextRequest) { message, }) span.setAttribute(TraceAttr.CopilotConfirmOutcome, CopilotConfirmOutcome.UpdateFailed) - return createBadRequestResponse( - 'Failed to update tool call status or tool call not found' - ) + // DB write failed — 500, not 400. 400 is a client-shape error. + return createInternalServerErrorResponse('Failed to update tool call status') } span.setAttribute(TraceAttr.CopilotConfirmOutcome, CopilotConfirmOutcome.Delivered) diff --git a/apps/sim/app/workspace/[workspaceId]/components/message-actions/message-actions.tsx b/apps/sim/app/workspace/[workspaceId]/components/message-actions/message-actions.tsx index 84f1ce4cada..a86350a8881 100644 --- a/apps/sim/app/workspace/[workspaceId]/components/message-actions/message-actions.tsx +++ b/apps/sim/app/workspace/[workspaceId]/components/message-actions/message-actions.tsx @@ -140,12 +140,6 @@ export function MessageActions({ content, chatId, userQuery, requestId }: Messag const hasContent = Boolean(content) const canSubmitFeedback = Boolean(chatId && userQuery) - - // Render the action row whenever there's something the user can - // actually act on: copy the message, or open the feedback modal - // (thumbs up / down). Request ID alone is not a reason to render the - // row anymore — it's only exposed from inside the thumbs-down modal, - // which requires both chatId and userQuery. if (!hasContent && !canSubmitFeedback) return null return ( @@ -198,15 +192,6 @@ export function MessageActions({ content, chatId, userQuery, requestId }: Messag )} - {/* - Intentionally NO root-row "Copy request ID" button here — it - rendered as an ambiguous standalone Copy icon next to the - message Copy icon, which was confusing (two indistinguishable - copy buttons side by side). The request ID only needs to be - grabbable from the thumbs-down feedback modal below, which is - the surface we actually want people to use when reporting a - bad response. - */} diff --git a/apps/sim/app/workspace/[workspaceId]/home/components/mothership-chat/mothership-chat.tsx b/apps/sim/app/workspace/[workspaceId]/home/components/mothership-chat/mothership-chat.tsx index 6868644f1b7..fb94ac5a759 100644 --- a/apps/sim/app/workspace/[workspaceId]/home/components/mothership-chat/mothership-chat.tsx +++ b/apps/sim/app/workspace/[workspaceId]/home/components/mothership-chat/mothership-chat.tsx @@ -179,26 +179,16 @@ export function MothershipChat({ onOptionSelect={isLastMessage ? onSubmit : undefined} onWorkspaceResourceSelect={onWorkspaceResourceSelect} /> - {/* - Render the actions row whenever the assistant turn has - settled (not streaming) AND there's anything to act on. - We intentionally include `requestId` in the trigger so - that aborted or content-less turns still surface the - copy-trace-ID button — dropping the row in those cases - makes it impossible for a user to grab the request ID - needed for bug reports. - */} - {!isThisStreaming && - (msg.content || msg.contentBlocks?.length || msg.requestId) && ( -
- -
- )} + {!isThisStreaming && (msg.content || msg.contentBlocks?.length) && ( +
+ +
+ )} ) })} diff --git a/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts b/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts index 645bf447c13..93ed060be3d 100644 --- a/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts +++ b/apps/sim/app/workspace/[workspaceId]/home/hooks/use-chat.ts @@ -1272,21 +1272,13 @@ export function useChat( const activeTurnRef = useRef(null) const pendingUserMsgRef = useRef(null) const streamIdRef = useRef(undefined) - // W3C traceparent for the currently-streaming chat request. Sim's - // chat POST response returns this header built from the root - // gen_ai.agent.execute span; we echo it on every side-channel - // request (abort/stop/confirm/stream-replay) so they appear as - // child spans of the same trace instead of disconnected roots. - // Cleared when a new chat starts (overwritten by the next POST). + // W3C traceparent from the chat POST response; echoed on + // abort/stop/confirm/replay so side-channel calls join the same + // trace instead of becoming disconnected roots. const streamTraceparentRef = useRef(undefined) - /** - * The `request.id` stamped on the active stream's trace events. Used - * to forward to the server-side Stop route so the persisted aborted - * assistant message keeps its requestId — which the UI needs for the - * "Copy request ID" button (the user's handle for bug reports on bad - * turns). Updated from the `trace` payload on every parsed stream - * event; reset by the caller when a new chat starts. - */ + // The `request.id` from the active stream's trace events. Forwarded + // to /chat/stop so the persisted aborted message carries it (keeps + // the copy-request-ID button functional after refetch). const streamRequestIdRef = useRef(undefined) const locallyTerminalStreamIdRef = useRef(undefined) const lastCursorRef = useRef('0') @@ -1327,11 +1319,6 @@ export function useChat( pendingUserMsgRef.current = null streamIdRef.current = undefined streamRequestIdRef.current = undefined - // Drop the previous stream's W3C traceparent so side-channel - // requests fired AFTER a turn ended don't propagate stale trace - // context. Without this the abort/confirm/stop handlers could - // still parent under the finished turn's span; each new chat POST - // overwrites this ref to the fresh turn's traceparent. streamTraceparentRef.current = undefined setCurrentChatTraceparent(undefined) lastCursorRef.current = '0' @@ -1835,11 +1822,7 @@ export function useChat( while (true) { if (pendingLines.length === 0) { - // Once the terminal `complete` event has been processed, - // don't read another chunk — we've drained everything - // that was locally buffered alongside the terminator. - // Any further events would be a server-side bug (emitting - // after `complete`); don't hang waiting for them. + // Don't read another chunk after `complete` has drained. if (sawCompleteEvent) break const { done, value } = await reader.read() if (done) break @@ -1880,9 +1863,6 @@ export function useChat( if (parsed.trace?.requestId && parsed.trace.requestId !== streamRequestId) { streamRequestId = parsed.trace.requestId - // Mirror into a ref so stopGeneration / persistPartialResponse - // can read the latest requestId without being re-created on - // every render (they close over the ref, not a stale value). streamRequestIdRef.current = streamRequestId flush() } @@ -2500,25 +2480,11 @@ export function useChat( } case MothershipStreamV1EventType.complete: { sawCompleteEvent = true - // `complete` is the logical end-of-turn marker, NOT a - // guillotine on the read loop. The server commonly - // flushes a few more events in the SAME TCP chunk as - // the terminal `complete` — trailing text fragments, - // followup-action blocks, or run metadata — so we must - // drain everything that's already sitting in the - // `pendingLines` buffer before stopping. Bailing on - // first sight used to truncate the last sentence of - // the assistant response and drop followups on the - // floor, even though the server had persisted them - // correctly (trace 677af168, request - // 06ff631a-4d72-4862-ac5c-9dbdd0c380c2). - // - // We still avoid another `reader.read()` — events that - // arrive in a SUBSEQUENT chunk after `complete` would - // be a server bug; don't wait for them. Draining only - // what's already locally buffered strikes the balance: - // no lost events from the terminal chunk, no hang on - // a misbehaving transport. + // `complete` is the end-of-turn marker; drain whatever + // else arrived in the same TCP chunk (trailing text, + // followups, run metadata) before stopping. Do NOT + // await another read — events after `complete` would + // be a server bug. continue } } @@ -2582,11 +2548,6 @@ export function useChat( `/api/mothership/chat/stream?streamId=${encodeURIComponent(streamId)}&after=${encodeURIComponent(afterCursor)}&batch=true`, { signal, - // Propagate the original chat trace so batch-replay spans - // nest under the same trace as the chat POST. Empty on - // page-reload reconnects (stored ref was wiped), in which - // case the resume handler starts its own root — unchanged - // from pre-linking behavior. ...(streamTraceparentRef.current ? { headers: { traceparent: streamTraceparentRef.current } } : {}), @@ -2907,13 +2868,10 @@ export function useChat( streamId?: string content?: string blocks?: ContentBlock[] - // Caller-supplied snapshot. `stopGeneration` calls - // `clearActiveTurn()` BEFORE firing this, which nulls - // `streamRequestIdRef`; anyone relying on the ref at POST time - // would send `requestId: undefined` and the persisted message - // would lose its trace id handle (Copy-request-id button - // disappears on refetch — repro: trace de69695b). + // `stopGeneration` must snapshot these BEFORE clearActiveTurn() + // nulls the refs, or the fetch sees undefined. requestId?: string + traceparent?: string }) => { const chatId = overrides?.chatId ?? chatIdRef.current const streamId = overrides?.streamId ?? streamIdRef.current @@ -2921,6 +2879,7 @@ export function useChat( const content = overrides?.content ?? streamingContentRef.current const requestId = overrides?.requestId ?? streamRequestIdRef.current + const traceparent = overrides?.traceparent ?? streamTraceparentRef.current const sourceBlocks = overrides?.blocks ?? streamingBlocksRef.current const storedBlocks = sourceBlocks.map((block) => { @@ -2955,20 +2914,13 @@ export function useChat( method: 'POST', headers: { 'Content-Type': 'application/json', - ...(streamTraceparentRef.current ? { traceparent: streamTraceparentRef.current } : {}), + ...(traceparent ? { traceparent } : {}), }, body: JSON.stringify({ chatId, streamId, content, ...(storedBlocks.length > 0 && { contentBlocks: storedBlocks }), - // Forward the active stream's requestId so the server can - // stamp it onto the persisted aborted assistant message — - // keeps the "Copy request ID" button working after the - // in-memory streaming message gets replaced by the persisted - // one on chat history refetch. Pulled from the resolved - // `requestId` (override preferred over ref) because the ref - // may have been cleared by the time this fetch runs. ...(requestId ? { requestId } : {}), }), }) @@ -3265,16 +3217,8 @@ export function useChat( signal: abortController.signal, }) - // Capture the server's root trace identity so we can propagate - // it on every subsequent side-channel call for this stream. - // See `streamTraceparentRef` comment above for full rationale. - // Fine to read even on non-ok responses — Sim still sets the - // header before validation fails so error traces are linked - // too; we just won't use it in that case because we return - // early below. Also mirror it into the module-level client- - // tool trace-context holder so tool-completion callbacks - // fired from non-React code paths (e.g. workflow runner) can - // echo it without having to thread a prop through. + // Capture for propagation on side-channel calls + non-React + // tool-completion callbacks (via trace-context singleton). const traceparent = response.headers.get('traceparent') if (traceparent) { streamTraceparentRef.current = traceparent @@ -3522,10 +3466,12 @@ export function useChat( ...(block.options ? { options: [...block.options] } : {}), ...(block.toolCall ? { toolCall: { ...block.toolCall } } : {}), })) - // Snapshot BEFORE clearActiveTurn() nulls the ref. The - // persistPartialResponse fetch runs inside stopBarrier below, - // after several awaits — the ref is long gone by then. + // Snapshot BEFORE clearActiveTurn() nulls the refs. Both + // persistPartialResponse and the abort/stop fetches run inside + // stopBarrier below, after several awaits — the refs are long + // gone by the time the fetches serialize their headers. const stopRequestIdSnapshot = streamRequestIdRef.current + const stopTraceparentSnapshot = streamTraceparentRef.current locallyTerminalStreamIdRef.current = sid streamGenRef.current++ @@ -3587,8 +3533,8 @@ export function useChat( method: 'POST', headers: { 'Content-Type': 'application/json', - ...(streamTraceparentRef.current - ? { traceparent: streamTraceparentRef.current } + ...(stopTraceparentSnapshot + ? { traceparent: stopTraceparentSnapshot } : {}), }, body: JSON.stringify({ @@ -3614,6 +3560,7 @@ export function useChat( content: stopContentSnapshot, blocks: stopBlocksSnapshot, requestId: stopRequestIdSnapshot, + traceparent: stopTraceparentSnapshot, }) } @@ -3627,12 +3574,7 @@ export function useChat( pendingStopPromiseRef.current = stopBarrier try { await stopBarrier - // Notify downstream (onStreamEnd) and dispatch any queued - // follow-up message. Without this, a user who queued a message - // during streaming and then hit Stop would see the queued - // message stay queued until they manually re-sent — because - // `stopGeneration` previously short-circuited the whole turn- - // end pipeline. + // Dispatch queued follow-ups after Stop resolves. notifyTurnEnded({ error: false }) } catch (err) { setError(err instanceof Error ? err.message : 'Failed to stop the previous response') diff --git a/apps/sim/instrumentation-node.ts b/apps/sim/instrumentation-node.ts index b78f1ddfda1..54804e1955c 100644 --- a/apps/sim/instrumentation-node.ts +++ b/apps/sim/instrumentation-node.ts @@ -1,28 +1,6 @@ -/** - * Sim OpenTelemetry - Server-side Instrumentation - * - * Mothership joint trace design - * ----------------------------- - * Both Sim (this file) and the Go copilot server register under a single - * OTel `service.name = "mothership"` so every request shows up as one - * service in the OTLP backend. To keep the two halves distinguishable: - * - * - Every span emitted by the mothership lifecycle on this process is - * prefixed with `sim-mothership: ` on start, and gets a - * `mothership.origin = "sim-mothership"` attribute. - * - The Go side does the same with `go-mothership: ` / - * `mothership.origin = "go-mothership"`. - * - * The `-mothership` suffix on the origin is deliberate: this Sim process - * hosts plenty of non-mothership code (workflow executor, block runtime, - * indexer clients) that may emit its own traces in the future. Making - * the origin value explicit means a later "sim" origin can't collide - * with the mothership side. - * - * So in any OTLP backend, filter by `mothership.origin` (exact) or by - * operation name prefix (`sim-mothership:` / `go-mothership:`) to - * cleanly split the two halves. - */ +// Sim OTel bootstrap. Filter by `mothership.origin` or span-name +// prefix (`sim-mothership:` / `go-mothership:`) to separate the two +// halves of a mothership trace in the OTLP backend. import type { Attributes, Context, Link, SpanKind } from '@opentelemetry/api' import { DiagConsoleLogger, DiagLogLevel, diag, TraceFlags, trace } from '@opentelemetry/api' @@ -41,21 +19,13 @@ diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.ERROR) const logger = createLogger('OTelInstrumentation') -// Origin value lives on every mothership span as `mothership.origin`. -// Longer form intentionally used (vs. plain "sim") so non-mothership -// code running in this same Sim process can't collide if it later -// starts emitting its own traces. const MOTHERSHIP_ORIGIN = 'sim-mothership' as const const SPAN_NAME_PREFIX = `${MOTHERSHIP_ORIGIN}: ` -// Short slug used only for `service.instance.id`. Kept as plain "sim" -// so the instance id reads as `mothership-sim` — concise, already -// scoped by `service.name = "mothership"` as the container. const SERVICE_INSTANCE_SLUG = 'sim' as const const DEFAULT_TELEMETRY_CONFIG = { endpoint: env.TELEMETRY_ENDPOINT || 'https://telemetry.simstudio.ai/v1/traces', - // Joint Sim+Go service surface in Jaeger/Tempo. See header comment. serviceName: 'mothership', serviceVersion: '0.1.0', serverSide: { enabled: true }, @@ -67,33 +37,18 @@ const DEFAULT_TELEMETRY_CONFIG = { }, } -/** - * Span name prefixes we keep after sampling. - * - * Scope: this process only traces *mothership / copilot* requests for now. - * Anything outside that lifecycle (workflow executor, block runtime, - * Next.js framework noise, etc.) is intentionally dropped so Jaeger only - * shows the Sim half of a mothership trace. - * - * Any new prefix here should correspond to a span our copilot code - * explicitly creates; adding a broad prefix (e.g. `http.`) risks - * silently re-enabling non-copilot tracing. - */ +// Allowlist of span-name prefixes exported from this process. +// Non-mothership code (workflow executor, block runtime, framework +// noise) is dropped. Broaden carefully — `http.` etc. would reopen +// the firehose. const ALLOWED_SPAN_PREFIXES = ['gen_ai.', 'copilot.', 'sim →', 'sim.', 'tool.execute'] function isBusinessSpan(spanName: string): boolean { return ALLOWED_SPAN_PREFIXES.some((prefix) => spanName.startsWith(prefix)) } -/** - * Parse OTLP headers from the standard env var `OTEL_EXPORTER_OTLP_HEADERS`. - * - * Spec format: `key1=value1,key2=value2`, with values optionally - * URL-encoded. We tolerate whitespace around entries and values that - * themselves contain `=`. This is the mechanism every managed backend - * (Honeycomb, Grafana Cloud, New Relic, Datadog) uses to receive its - * auth token without any backend-specific code paths here. - */ +// Parse `OTEL_EXPORTER_OTLP_HEADERS`: `key1=value1,key2=value2` +// (URL-encoded values, whitespace tolerated). function parseOtlpHeadersEnv(raw: string): Record { const out: Record = {} if (!raw) return out @@ -115,30 +70,9 @@ function parseOtlpHeadersEnv(raw: string): Record { return out } -/** - * Normalize an OTLP base URL to the full traces-signal endpoint. - * - * The OTel HTTP exporter sends to whatever URL you give it verbatim - * — no signal-path appending. Meanwhile, the OTel spec says - * `OTEL_EXPORTER_OTLP_ENDPOINT` is a *base* URL and the SDK should - * append `/v1/traces`. We reconcile by always ensuring the final - * URL ends with `/v1/traces` unless the operator already put it - * there. - * - * Rules: - * - If the URL already ends with `/v1/traces`, respect it. - * - Otherwise, append `/v1/traces` (dropping any trailing slash - * on the base first). - * - Malformed URLs pass through unchanged; the exporter will - * surface the error at first export. - * - * Examples: - * https://api.honeycomb.io → https://api.honeycomb.io/v1/traces - * https://api.honeycomb.io/v1/traces → https://api.honeycomb.io/v1/traces - * https://otlp-gateway-prod-us-east-3.grafana.net/otlp - * → …/otlp/v1/traces - * http://localhost:4318 → http://localhost:4318/v1/traces - */ +// Append `/v1/traces` to the OTLP base URL unless already present. +// The HTTP exporter doesn't auto-suffix the signal path even though +// the spec says the env var is a base URL. function normalizeOtlpTracesUrl(url: string): string { if (!url) return url try { @@ -151,14 +85,9 @@ function normalizeOtlpTracesUrl(url: string): string { } } -/** - * Resolve the sampling ratio from env, with sensible fallbacks. - * - * Matches the Go side's `samplerFromEnv()` semantics so operators can - * control both halves of the mothership trace tree from the same - * variable. Invalid values degrade gracefully to the fallback. - */ -function resolveSamplingRatio(isLocalEndpoint: boolean): number { +// Sampling ratio from env (mirrors Go's `samplerFromEnv`); fallback +// is 100% everywhere. Retention caps cost, not sampling. +function resolveSamplingRatio(_isLocalEndpoint: boolean): number { const raw = process.env.TELEMETRY_SAMPLING_RATIO || process.env.OTEL_TRACES_SAMPLER_ARG || '' if (raw) { const parsed = Number.parseFloat(raw) @@ -168,29 +97,12 @@ function resolveSamplingRatio(isLocalEndpoint: boolean): number { return parsed } } - // Local dev gets 100% for deterministic manual verification. - // Production default is also 100% — the 1-day retention at the - // backend caps storage cost, not sampling. - return isLocalEndpoint ? 1.0 : 1.0 + return 1.0 } -/** - * MothershipOriginSpanProcessor tags mothership-lifecycle spans with - * `mothership.origin` and prepends the origin prefix to the span name - * on start, before any downstream processor (BatchSpanProcessor) - * reads it. - * - * Gated on `isBusinessSpan(name)` so only spans that already match - * the mothership allowlist get the label. The sampler drops - * non-mothership roots anyway, but keeping the tagger conditional - * means that if the sampler is ever relaxed (or a different - * instrumentation stream is added alongside mothership), unrelated - * spans won't accidentally inherit the mothership origin. - * - * Implemented as its own processor rather than a resource attribute - * so the backend span/operation list (which keys on span name) is - * visually split between sim and go even when both share service.name. - */ +// Tags allowed spans with `mothership.origin` and prepends +// `sim-mothership:` to the span name so backends can visually split +// the two halves even when service.name is shared. class MothershipOriginSpanProcessor implements SpanProcessor { onStart(span: Span): void { const name = span.name @@ -225,11 +137,7 @@ async function initializeOpenTelemetry() { telemetryConfig = DEFAULT_TELEMETRY_CONFIG } - // Endpoint resolution: prefer the OTel spec env var, fall back to - // our legacy TELEMETRY_ENDPOINT so existing deploys keep working - // during rollout. Read process.env directly because - // @t3-oss/env-nextjs sometimes returns undefined for server vars - // that aren't listed in experimental__runtimeEnv. + // Prefer the OTel spec env var, fall back to legacy TELEMETRY_ENDPOINT. const resolvedEndpoint = process.env.OTEL_EXPORTER_OTLP_ENDPOINT || process.env.TELEMETRY_ENDPOINT || @@ -263,14 +171,9 @@ async function initializeOpenTelemetry() { '@opentelemetry/sdk-trace-base' ) - // Sampler responsibilities: - // 1. Drop Next.js framework spans (tagged with next.span_type). - // 2. If we're inside a sampled business trace (parent has SAMPLED), let - // the child record so the full trace stays together. - // 3. For a business-span ROOT, decide afresh with the ratio sampler — - // ignoring an unsampled Next.js HTTP parent. Delegating to - // ParentBasedSampler here would use its localParentNotSampled - // inner sampler (AlwaysOff by default) and veto every trace. + // Drops Next framework spans, inherits SAMPLED from business + // parents, and re-samples business roots fresh (don't delegate to + // ParentBased — its unsampled-parent path is AlwaysOff by default). const createBusinessSpanSampler = (rootRatioSampler: Sampler): Sampler => ({ shouldSample( context: Context, @@ -311,19 +214,7 @@ async function initializeOpenTelemetry() { }, }) - // Parse OTEL_EXPORTER_OTLP_HEADERS per the OTel spec: comma- - // separated `key=value` pairs, values optionally URL-encoded. This - // is how managed backends (Honeycomb, Grafana Cloud, New Relic) - // receive their API keys without needing a vendor-specific code - // path — flip the secret, redeploy, traces land in the new place. const otlpHeaders = parseOtlpHeadersEnv(process.env.OTEL_EXPORTER_OTLP_HEADERS || '') - - // The @opentelemetry/exporter-trace-otlp-http exporter treats the - // `url` option as the complete POST target and does NOT append the - // `/v1/traces` signal path. The Go SDK, by contrast, does append - // it when only a host is given. Normalize here so operators can - // set the same `OTEL_EXPORTER_OTLP_ENDPOINT=https://api.honeycomb.io` - // for both services and have it Just Work. const exporterUrl = normalizeOtlpTracesUrl(telemetryConfig.endpoint) const exporter = new OTLPTraceExporter({ @@ -333,8 +224,7 @@ async function initializeOpenTelemetry() { keepAlive: false, }) - // Surface export failures in the Sim log instead of letting - // BatchSpanProcessor silently drop them. + // Surface export failures (BatchSpanProcessor swallows them otherwise). const origExport = exporter.export.bind(exporter) exporter.export = (spans, resultCallback) => { origExport(spans, (result) => { @@ -358,29 +248,16 @@ async function initializeOpenTelemetry() { exportTimeoutMillis: telemetryConfig.batchSettings.exportTimeoutMillis, }) - // service.instance.id identifies this specific process within the - // shared `mothership` service. Jaeger's clock-skew adjuster groups - // spans by (service, instance) — without a unique instance per - // origin, Sim and Go spans fall into the same group, Jaeger sees - // multi-second cross-machine clock drift within one group, and its - // adjuster emits spurious "parent is not in the trace; skipping - // clock skew adjustment" warnings on every cross-process child. - // Using the short slug (`sim` / `go`) keeps the instance id as - // `mothership-sim` / `mothership-go` — already scoped by - // `service.name = "mothership"` as the container. The longer - // `mothership.origin = "sim-mothership"` value does the - // disambiguation at the attribute level. + // Unique instance id per origin keeps Jaeger's clock-skew adjuster + // from grouping Sim+Go spans together (they'd see multi-second + // drift as intra-service and emit spurious warnings). const serviceInstanceId = `${telemetryConfig.serviceName}-${SERVICE_INSTANCE_SLUG}` const resource = defaultResource().merge( resourceFromAttributes({ [ATTR_SERVICE_NAME]: telemetryConfig.serviceName, [ATTR_SERVICE_VERSION]: telemetryConfig.serviceVersion, - // Explicit OTel env var wins; fall back to `DEPLOYMENT_ENVIRONMENT` - // for alt spellings; finally fall back to `NODE_ENV` so local dev - // (which rarely sets the otel vars) still produces a reasonable - // label. Matches the Go side's `resourceEnvFromEnv()` so Sim and - // Go always tag the same `deployment.environment` value for the - // same deploy. + // OTEL_ → DEPLOYMENT_ENVIRONMENT → NODE_ENV; matches Go's + // `resourceEnvFromEnv()` so both halves tag the same value. [ATTR_DEPLOYMENT_ENVIRONMENT]: process.env.OTEL_DEPLOYMENT_ENVIRONMENT || process.env.DEPLOYMENT_ENVIRONMENT || @@ -395,16 +272,6 @@ async function initializeOpenTelemetry() { }) ) - // Sampling ratio resolution, in priority order: - // 1. `TELEMETRY_SAMPLING_RATIO` (our explicit, matches Go side) - // 2. `OTEL_TRACES_SAMPLER_ARG` (OTel spec env var) - // 3. 1.0 for local endpoints (so dev traces are deterministic) - // 4. 1.0 otherwise (production wants every mothership request — - // retention happens at the backend) - // - // `1.0` is the right default for mothership: every request is - // support-critical and we rely on the backend's retention (1 day - // in prod) to cap storage, not upstream sampling. const isLocalEndpoint = /localhost|127\.0\.0\.1/i.test(telemetryConfig.endpoint) const samplingRatio = resolveSamplingRatio(isLocalEndpoint) const rootRatioSampler = new TraceIdRatioBasedSampler(samplingRatio) @@ -416,9 +283,7 @@ async function initializeOpenTelemetry() { origin: MOTHERSHIP_ORIGIN, }) - // Order matters: the origin-prefix processor must run BEFORE the batch - // processor so the renamed span and the mothership.origin attribute are - // captured on export. + // Origin-prefix must run before batch so the rename/attr is captured. const spanProcessors: SpanProcessor[] = [new MothershipOriginSpanProcessor(), batchProcessor] const sdk = new NodeSDK({ diff --git a/apps/sim/lib/copilot/async-runs/repository.ts b/apps/sim/lib/copilot/async-runs/repository.ts index ffcabd197cd..ddbe18bb61a 100644 --- a/apps/sim/lib/copilot/async-runs/repository.ts +++ b/apps/sim/lib/copilot/async-runs/repository.ts @@ -1,4 +1,4 @@ -import { SpanStatusCode, trace } from '@opentelemetry/api' +import { trace } from '@opentelemetry/api' import { db } from '@sim/db' import { type CopilotAsyncToolStatus, @@ -10,6 +10,8 @@ import { import { createLogger } from '@sim/logger' import { and, desc, eq, inArray, isNull } from 'drizzle-orm' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' +import { markSpanForError } from '@/lib/copilot/request/otel' import { ASYNC_TOOL_STATUS, type AsyncCompletionData, @@ -23,11 +25,9 @@ const logger = createLogger('CopilotAsyncRunsRepo') // can evaluate modules before instrumentation-node.ts finishes). const getAsyncRunsTracer = () => trace.getTracer('sim-copilot-async-runs', '1.0.0') -/** - * withDbSpan wraps an async DB operation in a client-kind OTel span with - * canonical `db.*` attributes so every async-runs call is visible in traces - * alongside the owning request. - */ +// Wrap an async DB op in a client-kind span with canonical `db.*` attrs. +// Cancellation is routed through `markSpanForError` so aborts record the +// exception event but don't paint spans red. async function withDbSpan( name: string, op: string, @@ -44,32 +44,15 @@ async function withDbSpan( }, }) try { - const result = await fn() - return result + return await fn() } catch (error) { - // AbortError / cancellation is a control-flow outcome, not a DB - // failure. Record the exception event but skip `codes.ERROR` so - // the trace doesn't show red spans for every aborted request - // that happened to have an in-flight async-runs query. - span.recordException(error instanceof Error ? error : new Error(String(error))) - if (!isAbortError(error)) { - span.setStatus({ - code: SpanStatusCode.ERROR, - message: error instanceof Error ? error.message : String(error), - }) - } + markSpanForError(span, error) throw error } finally { span.end() } } -function isAbortError(err: unknown): boolean { - if (err == null || typeof err !== 'object') return false - const e = err as { name?: unknown; code?: unknown } - return e.name === 'AbortError' || e.code === 'ABORT_ERR' -} - export interface CreateRunSegmentInput { id?: string executionId: string @@ -88,19 +71,19 @@ export interface CreateRunSegmentInput { export async function createRunSegment(input: CreateRunSegmentInput) { return withDbSpan( - 'copilot.async_runs.create_run_segment', + TraceSpan.CopilotAsyncRunsCreateRunSegment, 'INSERT', 'copilot_runs', { - 'copilot.execution_id': input.executionId, - 'copilot.chat_id': input.chatId, - 'copilot.stream_id': input.streamId, - 'copilot.user_id': input.userId, - 'copilot.run.parent_id': input.parentRunId ?? undefined, - 'copilot.run.agent': input.agent ?? undefined, - 'copilot.run.model': input.model ?? undefined, - 'copilot.run.provider': input.provider ?? undefined, - 'copilot.run.status': input.status ?? 'active', + [TraceAttr.CopilotExecutionId]: input.executionId, + [TraceAttr.ChatId]: input.chatId, + [TraceAttr.StreamId]: input.streamId, + [TraceAttr.UserId]: input.userId, + [TraceAttr.CopilotRunParentId]: input.parentRunId ?? undefined, + [TraceAttr.CopilotRunAgent]: input.agent ?? undefined, + [TraceAttr.CopilotRunModel]: input.model ?? undefined, + [TraceAttr.CopilotRunProvider]: input.provider ?? undefined, + [TraceAttr.CopilotRunStatus]: input.status ?? 'active', }, async () => { const [run] = await db @@ -136,14 +119,14 @@ export async function updateRunStatus( } = {} ) { return withDbSpan( - 'copilot.async_runs.update_run_status', + TraceSpan.CopilotAsyncRunsUpdateRunStatus, 'UPDATE', 'copilot_runs', { - 'copilot.run.id': runId, - 'copilot.run.status': status, - 'copilot.run.has_error': !!updates.error, - 'copilot.run.has_completed_at': !!updates.completedAt, + [TraceAttr.RunId]: runId, + [TraceAttr.CopilotRunStatus]: status, + [TraceAttr.CopilotRunHasError]: !!updates.error, + [TraceAttr.CopilotRunHasCompletedAt]: !!updates.completedAt, }, async () => { const [run] = await db @@ -164,10 +147,10 @@ export async function updateRunStatus( export async function getLatestRunForExecution(executionId: string) { return withDbSpan( - 'copilot.async_runs.get_latest_for_execution', + TraceSpan.CopilotAsyncRunsGetLatestForExecution, 'SELECT', 'copilot_runs', - { 'copilot.execution_id': executionId }, + { [TraceAttr.CopilotExecutionId]: executionId }, async () => { const [run] = await db .select() @@ -180,20 +163,8 @@ export async function getLatestRunForExecution(executionId: string) { ) } -/** - * Deliberately UN-instrumented with OTel spans. Called from a 250ms - * poll loop in `app/api/copilot/chat/stream/route.ts` during every - * resume SSE connection — at 4 Hz for the whole lifetime of the - * connection, emitting a span per poll blew up long traces with - * hundreds of noop DB spans (observed ~240 spans/minute during - * reproduction). - * - * If we ever need visibility into this query's latency, add a Prom - * histogram (aggregates cleanly) rather than per-call spans at 4 Hz. - * The raw query is also fired once-off from several non-polling call - * sites; those get accurate DB latency from the request-level - * postgres instrumentation lower down the stack. - */ +// Un-instrumented: called from a 4 Hz resume poll; per-call spans +// swamped traces. Use Prom histograms if latency visibility is needed. export async function getLatestRunForStream(streamId: string, userId?: string) { const conditions = userId ? and(eq(copilotRuns.streamId, streamId), eq(copilotRuns.userId, userId)) @@ -209,10 +180,10 @@ export async function getLatestRunForStream(streamId: string, userId?: string) { export async function getRunSegment(runId: string) { return withDbSpan( - 'copilot.async_runs.get_run_segment', + TraceSpan.CopilotAsyncRunsGetRunSegment, 'SELECT', 'copilot_runs', - { 'copilot.run.id': runId }, + { [TraceAttr.RunId]: runId }, async () => { const [run] = await db.select().from(copilotRuns).where(eq(copilotRuns.id, runId)).limit(1) return run ?? null @@ -228,12 +199,12 @@ export async function createRunCheckpoint(input: { providerRequest: Record }) { return withDbSpan( - 'copilot.async_runs.create_run_checkpoint', + TraceSpan.CopilotAsyncRunsCreateRunCheckpoint, 'INSERT', 'copilot_run_checkpoints', { - 'copilot.run.id': input.runId, - 'copilot.checkpoint.pending_tool_call_id': input.pendingToolCallId, + [TraceAttr.RunId]: input.runId, + [TraceAttr.CopilotCheckpointPendingToolCallId]: input.pendingToolCallId, }, async () => { const [checkpoint] = await db @@ -261,14 +232,14 @@ export async function upsertAsyncToolCall(input: { status?: CopilotAsyncToolStatus }) { return withDbSpan( - 'copilot.async_runs.upsert_async_tool_call', + TraceSpan.CopilotAsyncRunsUpsertAsyncToolCall, 'UPSERT', 'copilot_async_tool_calls', { - 'tool.call_id': input.toolCallId, - 'tool.name': input.toolName, - 'copilot.async_tool.status': input.status ?? 'pending', - 'copilot.run.id': input.runId ?? undefined, + [TraceAttr.ToolCallId]: input.toolCallId, + [TraceAttr.ToolName]: input.toolName, + [TraceAttr.CopilotAsyncToolStatus]: input.status ?? 'pending', + [TraceAttr.RunId]: input.runId ?? undefined, }, async () => { const existing = await getAsyncToolCall(input.toolCallId) @@ -328,10 +299,10 @@ export async function upsertAsyncToolCall(input: { export async function getAsyncToolCall(toolCallId: string) { return withDbSpan( - 'copilot.async_runs.get_async_tool_call', + TraceSpan.CopilotAsyncRunsGetAsyncToolCall, 'SELECT', 'copilot_async_tool_calls', - { 'tool.call_id': toolCallId }, + { [TraceAttr.ToolCallId]: toolCallId }, async () => { const [row] = await db .select() @@ -355,14 +326,14 @@ export async function markAsyncToolStatus( } = {} ) { return withDbSpan( - 'copilot.async_runs.mark_async_tool_status', + TraceSpan.CopilotAsyncRunsMarkAsyncToolStatus, 'UPDATE', 'copilot_async_tool_calls', { - 'tool.call_id': toolCallId, - 'copilot.async_tool.status': status, - 'copilot.async_tool.has_error': !!updates.error, - 'copilot.async_tool.claimed_by': updates.claimedBy ?? undefined, + [TraceAttr.ToolCallId]: toolCallId, + [TraceAttr.CopilotAsyncToolStatus]: status, + [TraceAttr.CopilotAsyncToolHasError]: !!updates.error, + [TraceAttr.CopilotAsyncToolClaimedBy]: updates.claimedBy ?? undefined, }, async () => { const claimedAt = @@ -433,10 +404,10 @@ export async function markAsyncToolDelivered(toolCallId: string) { export async function listAsyncToolCallsForRun(runId: string) { return withDbSpan( - 'copilot.async_runs.list_for_run', + TraceSpan.CopilotAsyncRunsListForRun, 'SELECT', 'copilot_async_tool_calls', - { 'copilot.run.id': runId }, + { [TraceAttr.RunId]: runId }, async () => db .select() @@ -449,10 +420,10 @@ export async function listAsyncToolCallsForRun(runId: string) { export async function getAsyncToolCalls(toolCallIds: string[]) { if (toolCallIds.length === 0) return [] return withDbSpan( - 'copilot.async_runs.get_many', + TraceSpan.CopilotAsyncRunsGetMany, 'SELECT', 'copilot_async_tool_calls', - { 'copilot.async_tool.ids_count': toolCallIds.length }, + { [TraceAttr.CopilotAsyncToolIdsCount]: toolCallIds.length }, async () => db .select() @@ -463,12 +434,12 @@ export async function getAsyncToolCalls(toolCallIds: string[]) { export async function claimCompletedAsyncToolCall(toolCallId: string, workerId: string) { return withDbSpan( - 'copilot.async_runs.claim_completed', + TraceSpan.CopilotAsyncRunsClaimCompleted, 'UPDATE', 'copilot_async_tool_calls', { - 'tool.call_id': toolCallId, - 'copilot.async_tool.worker_id': workerId, + [TraceAttr.ToolCallId]: toolCallId, + [TraceAttr.CopilotAsyncToolWorkerId]: workerId, }, async () => { const [row] = await db @@ -493,12 +464,12 @@ export async function claimCompletedAsyncToolCall(toolCallId: string, workerId: export async function releaseCompletedAsyncToolClaim(toolCallId: string, workerId: string) { return withDbSpan( - 'copilot.async_runs.release_claim', + TraceSpan.CopilotAsyncRunsReleaseClaim, 'UPDATE', 'copilot_async_tool_calls', { - 'tool.call_id': toolCallId, - 'copilot.async_tool.worker_id': workerId, + [TraceAttr.ToolCallId]: toolCallId, + [TraceAttr.CopilotAsyncToolWorkerId]: workerId, }, async () => { const [row] = await db diff --git a/apps/sim/lib/copilot/chat/post.ts b/apps/sim/lib/copilot/chat/post.ts index 505c6d1d9ba..bc78deb8888 100644 --- a/apps/sim/lib/copilot/chat/post.ts +++ b/apps/sim/lib/copilot/chat/post.ts @@ -19,6 +19,10 @@ import { import { finalizeAssistantTurn } from '@/lib/copilot/chat/terminal-state' import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context' import { COPILOT_REQUEST_MODES } from '@/lib/copilot/constants' +import { + CopilotChatPersistOutcome, + CopilotTransport, +} from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { createBadRequestResponse, createUnauthorizedResponse } from '@/lib/copilot/request/http' @@ -282,14 +286,14 @@ async function persistUserMessage(params: { return withCopilotSpan( TraceSpan.CopilotChatPersistUserMessage, { - 'db.system': 'postgresql', - 'db.sql.table': 'copilot_chats', - 'chat.id': chatId, - 'chat.user_message_id': userMessageId, - 'chat.message_bytes': message.length, - 'chat.file_attachment_count': fileAttachments?.length ?? 0, - 'chat.context_count': contexts?.length ?? 0, - ...(workspaceId ? { 'workspace.id': workspaceId } : {}), + [TraceAttr.DbSystem]: 'postgresql', + [TraceAttr.DbSqlTable]: 'copilot_chats', + [TraceAttr.ChatId]: chatId, + [TraceAttr.ChatUserMessageId]: userMessageId, + [TraceAttr.ChatMessageBytes]: message.length, + [TraceAttr.ChatFileAttachmentCount]: fileAttachments?.length ?? 0, + [TraceAttr.ChatContextCount]: contexts?.length ?? 0, + ...(workspaceId ? { [TraceAttr.WorkspaceId]: workspaceId } : {}), }, async (span) => { const userMsg = buildPersistedUserMessage({ @@ -311,7 +315,9 @@ async function persistUserMessage(params: { const messagesAfter = Array.isArray(updated?.messages) ? updated.messages : undefined span.setAttributes({ - [TraceAttr.ChatPersistOutcome]: updated ? 'appended' : 'chat_not_found', + [TraceAttr.ChatPersistOutcome]: updated + ? CopilotChatPersistOutcome.Appended + : CopilotChatPersistOutcome.ChatNotFound, [TraceAttr.ChatMessagesAfter]: messagesAfter?.length ?? 0, }) @@ -401,21 +407,10 @@ function buildOnComplete(params: { if (!chatId) return - // One-writer rule on cancel paths: `/api/copilot/chat/stop` is the - // single DB writer when the user hit Stop (or the client - // disconnected). It writes the partial assistant message AND - // clears `conversationId` in the same UPDATE, filtered on - // `conversationId = streamId`. If `finalizeAssistantTurn` races - // ahead here and clears `conversationId` first, stop's UPDATE - // matches zero rows and the partial content silently vanishes on - // chat refetch (repro: trace c18de3e2 → `copilot.stop.outcome = - // 'no_matching_row'`). - // - // So: on cancel, skip finalize here and let /chat/stop run the - // terminal write. On real backend errors (`!success` without - // `cancelled`) we DO want to finalize — it clears the stream - // marker so the chat isn't stuck with a non-null `conversationId` - // and blocking future messages. + // On cancel, /chat/stop is the sole DB writer — it persists + // partial content AND clears conversationId in one UPDATE. If we + // finalize here first the filter misses and content vanishes. + // Real errors still finalize so the stream marker clears. if (result.cancelled) return try { @@ -616,15 +611,9 @@ export async function handleUnifiedChatPost(req: NextRequest) { // Errors thrown from the handler before the stream starts are // finished here in the catch below. let otelRoot: ReturnType | undefined - // `requestId` is the canonical logical ID for this HTTP request — - // same value that flows into `request.id`/`sim.request_id` span - // attributes, the persisted `msg.requestId`, and eventually the - // Grafana trace-ID search box. Derived from otelRoot.requestId (= the - // OTel trace ID of the root span) as soon as that's created. Stays - // empty only in the narrow window before otelRoot is set — errors in - // that window can't be correlated to any trace anyway, and their log - // line carries the error message + stack which is the actually - // useful info. + // Canonical logical ID; assigned from otelRoot.requestId (the OTel + // trace ID) as soon as startCopilotOtelRoot runs. Empty only in the + // narrow pre-otelRoot window where errors don't correlate anyway. let requestId = '' const executionId = crypto.randomUUID() const runId = crypto.randomUUID() @@ -641,44 +630,28 @@ export async function handleUnifiedChatPost(req: NextRequest) { userMessageId = body.userMessageId || crypto.randomUUID() otelRoot = startCopilotOtelRoot({ - // No explicit requestId — startCopilotOtelRoot derives it from - // the span's OTel trace ID so `msg.requestId` on the UI side - // ends up being the same value Grafana uses. See the scope - // doc-comment and the call site for why this is the desired - // direction of the unification. streamId: userMessageId, executionId, runId, - transport: 'stream', - // Truncated prompt for the dashboard "user message" column. - // Unconditional (no PII env gate) — a preview snippet is - // cheap and widely useful; full content is gated separately - // by setInputMessages above. + transport: CopilotTransport.Stream, userMessagePreview: body.message, }) - // Promote the OTel-derived ID to the handler-level `requestId` so - // every downstream consumer (logs, orchestrator, onComplete, - // onError, persisted assistant message) uses the same value. if (otelRoot.requestId) { requestId = otelRoot.requestId } - // Emit `gen_ai.input.messages` on the root agent span for OTel - // GenAI spec compliance (Honeycomb's Gen AI view keys off this). - // Gated on OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT - // internally — safe to always call. + // `setInputMessages` is internally gated on + // OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT; safe to call. otelRoot.setInputMessages({ userMessage: body.message }) - // Wrap the rest of the handler so every nested withCopilotSpan / - // withDbSpan (persistUserMessage, createRunSegment, resolveBranch DB - // hits) attaches to the root via AsyncLocalStorage. Before this - // wrapper those spans became orphan roots and each showed up as a - // separate trace in Jaeger. - return await otelContextApi.with(otelRoot.context, async () => { + // Wrap the rest of the handler so nested spans attach to the + // root via AsyncLocalStorage (otherwise they orphan into new traces). + const activeOtelRoot = otelRoot + return await otelContextApi.with(activeOtelRoot.context, async () => { const branch = await withCopilotSpan( TraceSpan.CopilotChatResolveBranch, { - 'branch.workflow_id': body.workflowId ?? '', - 'branch.workspace_id': body.workspaceId ?? '', + [TraceAttr.WorkflowId]: body.workflowId ?? '', + [TraceAttr.WorkspaceId]: body.workspaceId ?? '', }, () => resolveBranch({ @@ -690,7 +663,7 @@ export async function handleUnifiedChatPost(req: NextRequest) { mode: body.mode, provider: body.provider, }), - otelRoot!.context + activeOtelRoot.context ) if (branch instanceof NextResponse) { return branch @@ -705,8 +678,8 @@ export async function handleUnifiedChatPost(req: NextRequest) { const chatResult = await withCopilotSpan( TraceSpan.CopilotChatResolveOrCreateChat, { - 'chat.preexisting': !!body.chatId, - 'chat.create_new': !!body.createNewChat, + [TraceAttr.ChatPreexisting]: !!body.chatId, + [TraceAttr.CopilotChatIsNew]: !!body.createNewChat, }, () => resolveOrCreateChat({ @@ -717,7 +690,7 @@ export async function handleUnifiedChatPost(req: NextRequest) { model: branch.titleModel, type: branch.kind === 'workflow' ? 'copilot' : 'mothership', }), - otelRoot!.context + activeOtelRoot.context ) currentChat = chatResult.chat actualChatId = chatResult.chatId || body.chatId @@ -764,7 +737,7 @@ export async function handleUnifiedChatPost(req: NextRequest) { // wait are all known. This turns dashboard slicing by // `copilot.surface` / `copilot.mode` / `copilot.interrupted_prior_stream` // into a simple TraceQL filter. - otelRoot!.setRequestShape({ + activeOtelRoot.setRequestShape({ branchKind: branch.kind, mode: body.mode, model: body.model, @@ -797,16 +770,16 @@ export async function handleUnifiedChatPost(req: NextRequest) { branch.kind === 'workspace' ? withCopilotSpan( TraceSpan.CopilotChatBuildWorkspaceContext, - { 'workspace.id': branch.workspaceId }, + { [TraceAttr.WorkspaceId]: branch.workspaceId }, () => generateWorkspaceContext(branch.workspaceId, authenticatedUserId), - otelRoot!.context + activeOtelRoot.context ) : Promise.resolve(undefined) const agentContextsPromise = withCopilotSpan( TraceSpan.CopilotChatResolveAgentContexts, { - 'contexts.count': normalizedContexts.length, - 'attachments.count': body.resourceAttachments?.length ?? 0, + [TraceAttr.CopilotContextsCount]: normalizedContexts.length, + [TraceAttr.CopilotResourceAttachmentsCount]: body.resourceAttachments?.length ?? 0, }, () => resolveAgentContexts({ @@ -818,7 +791,7 @@ export async function handleUnifiedChatPost(req: NextRequest) { chatId: actualChatId, requestId, }), - otelRoot!.context + activeOtelRoot.context ) const persistedMessagesPromise = persistUserMessage({ chatId: actualChatId, @@ -828,11 +801,11 @@ export async function handleUnifiedChatPost(req: NextRequest) { contexts: normalizedContexts, workspaceId, notifyWorkspaceStatus: branch.notifyWorkspaceStatus, - parentOtelContext: otelRoot!.context, + parentOtelContext: activeOtelRoot.context, }) const executionContextPromise = withCopilotSpan( TraceSpan.CopilotChatBuildExecutionContext, - { 'branch.kind': branch.kind }, + { [TraceAttr.CopilotBranchKind]: branch.kind }, () => branch.buildExecutionContext({ userId: authenticatedUserId, @@ -840,7 +813,7 @@ export async function handleUnifiedChatPost(req: NextRequest) { userTimezone: body.userTimezone, messageId: userMessageId, }), - otelRoot!.context + activeOtelRoot.context ) const [agentContexts, userPermission, workspaceContext, persistedMessages, executionContext] = @@ -867,9 +840,9 @@ export async function handleUnifiedChatPost(req: NextRequest) { const requestPayload = await withCopilotSpan( TraceSpan.CopilotChatBuildPayload, { - 'branch.kind': branch.kind, - 'attachments.count': body.fileAttachments?.length ?? 0, - 'contexts.count': normalizedContexts.length, + [TraceAttr.CopilotBranchKind]: branch.kind, + [TraceAttr.CopilotFileAttachmentsCount]: body.fileAttachments?.length ?? 0, + [TraceAttr.CopilotContextsCount]: normalizedContexts.length, }, () => branch.kind === 'workflow' @@ -902,14 +875,14 @@ export async function handleUnifiedChatPost(req: NextRequest) { userTimezone: body.userTimezone, workspaceContext, }), - otelRoot!.context + activeOtelRoot.context ) if (actualChatId) { - otelRoot!.span.setAttribute(TraceAttr.ChatId, actualChatId) + activeOtelRoot.span.setAttribute(TraceAttr.ChatId, actualChatId) } if (workspaceId) { - otelRoot!.span.setAttribute(TraceAttr.WorkspaceId, workspaceId) + activeOtelRoot.span.setAttribute(TraceAttr.WorkspaceId, workspaceId) } const stream = createSSEStream({ @@ -926,7 +899,7 @@ export async function handleUnifiedChatPost(req: NextRequest) { ...(branch.titleProvider ? { titleProvider: branch.titleProvider } : {}), requestId, workspaceId, - otelRoot: otelRoot!, + otelRoot: activeOtelRoot, orchestrateOptions: { userId: authenticatedUserId, ...(branch.kind === 'workflow' ? { workflowId: branch.workflowId } : {}), @@ -962,7 +935,7 @@ export async function handleUnifiedChatPost(req: NextRequest) { // all side-channel work on this request appear as child spans // of this same trace in Tempo instead of disconnected roots. // W3C traceparent format: `00---`. - const rootCtx = otelRoot!.span.spanContext() + const rootCtx = activeOtelRoot.span.spanContext() const rootTraceparent = `00-${rootCtx.traceId}-${rootCtx.spanId}-${ (rootCtx.traceFlags & 0x1) === 0x1 ? '01' : '00' }` diff --git a/apps/sim/lib/copilot/chat/terminal-state.ts b/apps/sim/lib/copilot/chat/terminal-state.ts index f408ab80feb..f0f43cb6bb0 100644 --- a/apps/sim/lib/copilot/chat/terminal-state.ts +++ b/apps/sim/lib/copilot/chat/terminal-state.ts @@ -26,11 +26,11 @@ export async function finalizeAssistantTurn({ return withCopilotSpan( TraceSpan.CopilotChatFinalizeAssistantTurn, { - 'db.system': 'postgresql', - 'db.sql.table': 'copilot_chats', - 'chat.id': chatId, - 'chat.user_message_id': userMessageId, - 'chat.has_assistant_message': !!assistantMessage, + [TraceAttr.DbSystem]: 'postgresql', + [TraceAttr.DbSqlTable]: 'copilot_chats', + [TraceAttr.ChatId]: chatId, + [TraceAttr.ChatUserMessageId]: userMessageId, + [TraceAttr.ChatHasAssistantMessage]: !!assistantMessage, }, async (span) => { const [row] = await db diff --git a/apps/sim/lib/copilot/constants.ts b/apps/sim/lib/copilot/constants.ts index 1718cfc1d9d..475c659f4e5 100644 --- a/apps/sim/lib/copilot/constants.ts +++ b/apps/sim/lib/copilot/constants.ts @@ -34,9 +34,6 @@ export const STREAM_STORAGE_KEY = 'copilot_active_stream' /** POST — send a chat message through the unified mothership chat surface. */ export const MOTHERSHIP_CHAT_API_PATH = '/api/mothership/chat' -/** Backwards-compatible alias while remaining callers migrate. */ -export const COPILOT_CHAT_API_PATH = MOTHERSHIP_CHAT_API_PATH - /** POST — confirm or reject a tool call. */ export const COPILOT_CONFIRM_API_PATH = '/api/copilot/confirm' diff --git a/apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts b/apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts index 2040cbc8636..0172fa14d83 100644 --- a/apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts +++ b/apps/sim/lib/copilot/generated/trace-attribute-values-v1.ts @@ -272,6 +272,7 @@ export type CopilotVfsOutcomeValue = (typeof CopilotVfsOutcome)[CopilotVfsOutcom export const CopilotVfsReadOutcome = { BinaryPlaceholder: 'binary_placeholder', DocumentParsed: 'document_parsed', + DocumentTooLarge: 'document_too_large', ImagePrepared: 'image_prepared', ImageTooLarge: 'image_too_large', ParseFailed: 'parse_failed', diff --git a/apps/sim/lib/copilot/generated/trace-attributes-v1.ts b/apps/sim/lib/copilot/generated/trace-attributes-v1.ts index 973e3a0417d..a64c897a001 100644 --- a/apps/sim/lib/copilot/generated/trace-attributes-v1.ts +++ b/apps/sim/lib/copilot/generated/trace-attributes-v1.ts @@ -160,9 +160,11 @@ export const TraceAttr = { CopilotAsyncToolWorkerId: 'copilot.async_tool.worker_id', CopilotBranchKind: 'copilot.branch.kind', CopilotChatIsNew: 'copilot.chat.is_new', + CopilotCheckpointPendingToolCallId: 'copilot.checkpoint.pending_tool_call_id', CopilotCommandsCount: 'copilot.commands.count', CopilotConfirmOutcome: 'copilot.confirm.outcome', CopilotContextsCount: 'copilot.contexts.count', + CopilotExecutionId: 'copilot.execution.id', CopilotFileAttachmentsCount: 'copilot.file_attachments.count', CopilotFinalizeOutcome: 'copilot.finalize.outcome', CopilotInterruptedPriorStream: 'copilot.interrupted_prior_stream', @@ -245,6 +247,10 @@ export const TraceAttr = { CopilotVfsResizeAttempts: 'copilot.vfs.resize.attempts', CopilotVfsResizeChosenDimension: 'copilot.vfs.resize.chosen_dimension', CopilotVfsResizeChosenQuality: 'copilot.vfs.resize.chosen_quality', + CopilotVfsResizeDimension: 'copilot.vfs.resize.dimension', + CopilotVfsResizeFitsBudget: 'copilot.vfs.resize.fits_budget', + CopilotVfsResizeOutputBytes: 'copilot.vfs.resize.output_bytes', + CopilotVfsResizeQuality: 'copilot.vfs.resize.quality', CopilotVfsResized: 'copilot.vfs.resized', CopilotVfsSharpLoadFailed: 'copilot.vfs.sharp.load_failed', CostDefaultCost: 'cost.default_cost', @@ -305,7 +311,6 @@ export const TraceAttr = { GenAiUsageInputTokens: 'gen_ai.usage.input_tokens', GenAiUsageOutputTokens: 'gen_ai.usage.output_tokens', GenAiUsageTotalTokens: 'gen_ai.usage.total_tokens', - GenAiWorkflowExecutionId: 'gen_ai.workflow.execution_id', GenAiWorkflowId: 'gen_ai.workflow.id', GenAiWorkflowName: 'gen_ai.workflow.name', HostedKeyEnvVar: 'hosted_key.env_var', @@ -318,6 +323,7 @@ export const TraceAttr = { HttpResponseContentLength: 'http.response.content_length', HttpResponseHeadersMs: 'http.response.headers_ms', HttpResponseTotalMs: 'http.response.total_ms', + HttpRoute: 'http.route', HttpServerDurationMs: 'http.server.duration_ms', HttpStatusCode: 'http.status_code', HttpTarget: 'http.target', @@ -480,7 +486,6 @@ export const TraceAttr = { WorkflowCreatedId: 'workflow.created_id', WorkflowDurationMs: 'workflow.duration_ms', WorkflowEdgesCount: 'workflow.edges_count', - WorkflowExecutionId: 'workflow.execution_id', WorkflowHasFolder: 'workflow.has_folder', WorkflowHasWorkspace: 'workflow.has_workspace', WorkflowId: 'workflow.id', @@ -643,9 +648,11 @@ export const TraceAttrValues: readonly TraceAttrValue[] = [ 'copilot.async_tool.worker_id', 'copilot.branch.kind', 'copilot.chat.is_new', + 'copilot.checkpoint.pending_tool_call_id', 'copilot.commands.count', 'copilot.confirm.outcome', 'copilot.contexts.count', + 'copilot.execution.id', 'copilot.file_attachments.count', 'copilot.finalize.outcome', 'copilot.interrupted_prior_stream', @@ -728,6 +735,10 @@ export const TraceAttrValues: readonly TraceAttrValue[] = [ 'copilot.vfs.resize.attempts', 'copilot.vfs.resize.chosen_dimension', 'copilot.vfs.resize.chosen_quality', + 'copilot.vfs.resize.dimension', + 'copilot.vfs.resize.fits_budget', + 'copilot.vfs.resize.output_bytes', + 'copilot.vfs.resize.quality', 'copilot.vfs.resized', 'copilot.vfs.sharp.load_failed', 'cost.default_cost', @@ -788,7 +799,6 @@ export const TraceAttrValues: readonly TraceAttrValue[] = [ 'gen_ai.usage.input_tokens', 'gen_ai.usage.output_tokens', 'gen_ai.usage.total_tokens', - 'gen_ai.workflow.execution_id', 'gen_ai.workflow.id', 'gen_ai.workflow.name', 'hosted_key.env_var', @@ -801,6 +811,7 @@ export const TraceAttrValues: readonly TraceAttrValue[] = [ 'http.response.content_length', 'http.response.headers_ms', 'http.response.total_ms', + 'http.route', 'http.server.duration_ms', 'http.status_code', 'http.target', @@ -963,7 +974,6 @@ export const TraceAttrValues: readonly TraceAttrValue[] = [ 'workflow.created_id', 'workflow.duration_ms', 'workflow.edges_count', - 'workflow.execution_id', 'workflow.has_folder', 'workflow.has_workspace', 'workflow.id', diff --git a/apps/sim/lib/copilot/request/go/fetch.ts b/apps/sim/lib/copilot/request/go/fetch.ts index 529b05d3a84..3eb5deda044 100644 --- a/apps/sim/lib/copilot/request/go/fetch.ts +++ b/apps/sim/lib/copilot/request/go/fetch.ts @@ -2,6 +2,7 @@ import { type Context, context, SpanStatusCode, trace } from '@opentelemetry/api import { CopilotLeg } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { traceHeaders } from '@/lib/copilot/request/go/propagation' +import { markSpanForError } from '@/lib/copilot/request/otel' // Lazy tracer resolution: module-level `trace.getTracer()` can be evaluated // before `instrumentation-node.ts` installs the TracerProvider under @@ -9,7 +10,7 @@ import { traceHeaders } from '@/lib/copilot/request/go/propagation' // every outbound Sim → Go span. Resolving per-call avoids the race. const getTracer = () => trace.getTracer('sim-copilot-http', '1.0.0') -export interface OutboundFetchOptions extends RequestInit { +interface OutboundFetchOptions extends RequestInit { otelContext?: Context spanName?: string operation?: string @@ -90,38 +91,13 @@ export async function fetchGo(url: string, options: OutboundFetchOptions = {}): return response } catch (error) { span.setAttribute(TraceAttr.HttpResponseHeadersMs, Math.round(performance.now() - start)) - // AbortError isn't a real failure — it's the caller (user Stop, - // orchestrator deadline, reader disconnect) asking the fetch to - // stop. Record the exception event so the trace still carries the - // forensic detail, but skip `codes.ERROR` so dashboards don't - // treat every abort as a 5xx-class incident. Mirrors the Go-side - // carve-out for `context.Canceled` in `StreamSpan.RecordError`. - span.recordException(error instanceof Error ? error : new Error(String(error))) - if (!isFetchAbortError(error)) { - span.setStatus({ - code: SpanStatusCode.ERROR, - message: error instanceof Error ? error.message : String(error), - }) - } + markSpanForError(span, error) throw error } finally { span.end() } } -/** - * Matches every fetch-abort shape the Node/browser runtimes produce: - * DOMException `AbortError`, plain `Error` with `name === 'AbortError'`, - * and undici's `code === 'ABORT_ERR'`. Kept local to this module so - * there's no cross-cutting dependency; the canonical version lives in - * `lib/copilot/request/otel.ts` (`isCancellationError`). - */ -function isFetchAbortError(err: unknown): boolean { - if (err == null || typeof err !== 'object') return false - const e = err as { name?: unknown; code?: unknown } - return e.name === 'AbortError' || e.code === 'ABORT_ERR' -} - function safeParseUrl(url: string): URL | null { try { return new URL(url) diff --git a/apps/sim/lib/copilot/request/go/stream.ts b/apps/sim/lib/copilot/request/go/stream.ts index aa0f0df2cea..2326808b35f 100644 --- a/apps/sim/lib/copilot/request/go/stream.ts +++ b/apps/sim/lib/copilot/request/go/stream.ts @@ -168,7 +168,6 @@ export async function runStreamLoop( const bodyStart = performance.now() let firstEventMs: number | undefined let eventsReceived = 0 - let bytesReceived = 0 let endedOn = 'terminal' const reader = response.body.getReader() @@ -187,11 +186,6 @@ export async function runStreamLoop( firstEventMs = Math.round(performance.now() - bodyStart) } eventsReceived += 1 - try { - bytesReceived += JSON.stringify(raw ?? null).length - } catch { - // non-serializable event; skip byte accounting - } if (abortSignal?.aborted) { context.wasAborted = true return true @@ -355,7 +349,6 @@ export async function runStreamLoop( bodySpan.attributes = { ...(bodySpan.attributes ?? {}), eventsReceived, - bytesReceived, firstEventMs, endedOn, durationMs: bodyDurationMs, diff --git a/apps/sim/lib/copilot/request/handlers/tool.ts b/apps/sim/lib/copilot/request/handlers/tool.ts index 661335f53f0..23d3b7bed14 100644 --- a/apps/sim/lib/copilot/request/handlers/tool.ts +++ b/apps/sim/lib/copilot/request/handlers/tool.ts @@ -373,10 +373,10 @@ async function dispatchToolExecution( const pendingPromise = withCopilotSpan( TraceSpan.CopilotToolWaitForClientResult, { - 'tool.name': toolName, - 'tool.call_id': toolCallId, - 'tool.timeout_ms': options.timeout || STREAM_TIMEOUT_MS, - ...(context.runId ? { 'run.id': context.runId } : {}), + [TraceAttr.ToolName]: toolName, + [TraceAttr.ToolCallId]: toolCallId, + [TraceAttr.ToolTimeoutMs]: options.timeout || STREAM_TIMEOUT_MS, + ...(context.runId ? { [TraceAttr.RunId]: context.runId } : {}), }, async (span) => { await upsertAsyncToolCall({ diff --git a/apps/sim/lib/copilot/request/http.ts b/apps/sim/lib/copilot/request/http.ts index 34eb28cf76a..902f0da662a 100644 --- a/apps/sim/lib/copilot/request/http.ts +++ b/apps/sim/lib/copilot/request/http.ts @@ -38,7 +38,7 @@ export function createRequestId(): string { return generateId() } -export function createShortRequestId(): string { +function createShortRequestId(): string { return generateRequestId() } diff --git a/apps/sim/lib/copilot/request/lifecycle/finalize.ts b/apps/sim/lib/copilot/request/lifecycle/finalize.ts index 3d0e5de00cd..fa087a4b7d6 100644 --- a/apps/sim/lib/copilot/request/lifecycle/finalize.ts +++ b/apps/sim/lib/copilot/request/lifecycle/finalize.ts @@ -9,27 +9,17 @@ import { type RequestTraceV1Outcome, RequestTraceV1Outcome as RequestTraceV1OutcomeConst, } from '@/lib/copilot/generated/request-trace-v1' +import { CopilotFinalizeOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' +import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import type { StreamWriter } from '@/lib/copilot/request/session' import type { OrchestratorResult } from '@/lib/copilot/request/types' const logger = createLogger('CopilotStreamFinalize') -// Lazy tracer resolution: see comment in lib/copilot/request/otel.ts. const getTracer = () => trace.getTracer('sim-copilot-finalize', '1.0.0') -/** - * Single finalization path for stream results. - * - * `outcome` is the classifier's resolved verdict from the caller — it - * encodes "was this cancelled, errored, or completed" WITHOUT relying - * on the raw `abortController.signal.aborted` boolean. That matters - * because a client can disconnect mid-stream without the abort - * controller ever firing (the SSE `cancel()` callback only sets - * `publisher.clientDisconnected`); the lifecycle classifies THAT as - * `cancelled` too, but a prior API passed `aborted: false` into this - * function, sending us down `handleError` and persisting an `error` - * terminal state + run status. Now the outcome is the source of truth. - */ +// Single finalization path. `outcome` is the caller's resolved verdict +// so we don't have to re-derive cancel vs error from raw signals. export async function finalizeStream( result: OrchestratorResult, publisher: StreamWriter, @@ -39,15 +29,15 @@ export async function finalizeStream( ): Promise { const spanOutcome = outcome === RequestTraceV1OutcomeConst.cancelled - ? 'aborted' + ? CopilotFinalizeOutcome.Aborted : outcome === RequestTraceV1OutcomeConst.success - ? 'success' - : 'error' - const span = getTracer().startSpan('copilot.finalize_stream', { + ? CopilotFinalizeOutcome.Success + : CopilotFinalizeOutcome.Error + const span = getTracer().startSpan(TraceSpan.CopilotFinalizeStream, { attributes: { [TraceAttr.CopilotFinalizeOutcome]: spanOutcome, - 'copilot.run.id': runId, - 'copilot.request.id': requestId, + [TraceAttr.RunId]: runId, + [TraceAttr.RequestId]: requestId, [TraceAttr.CopilotResultToolCalls]: result.toolCalls?.length ?? 0, [TraceAttr.CopilotResultContentBlocks]: result.contentBlocks?.length ?? 0, [TraceAttr.CopilotResultContentLength]: result.content?.length ?? 0, @@ -67,6 +57,11 @@ export async function finalizeStream( } else { await handleSuccess(publisher, runId, requestId) } + // Successful + cancelled paths fall through as status-unset → set + // OK so dashboards don't show "incomplete" for normal terminals. + if (outcome !== RequestTraceV1OutcomeConst.error) { + span.setStatus({ code: SpanStatusCode.OK }) + } } catch (error) { span.recordException(error instanceof Error ? error : new Error(String(error))) span.setStatus({ code: SpanStatusCode.ERROR, message: 'finalize threw' }) diff --git a/apps/sim/lib/copilot/request/lifecycle/headless.ts b/apps/sim/lib/copilot/request/lifecycle/headless.ts index 2196f8b108a..57a8e27852c 100644 --- a/apps/sim/lib/copilot/request/lifecycle/headless.ts +++ b/apps/sim/lib/copilot/request/lifecycle/headless.ts @@ -4,6 +4,7 @@ import { RequestTraceV1Outcome, RequestTraceV1SpanStatus, } from '@/lib/copilot/generated/request-trace-v1' +import { CopilotTransport } from '@/lib/copilot/generated/trace-attribute-values-v1' import type { CopilotLifecycleOptions } from '@/lib/copilot/request/lifecycle/run' import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run' import { withCopilotOtelContext } from '@/lib/copilot/request/otel' @@ -42,7 +43,7 @@ export async function runHeadlessCopilotLifecycle( workflowId: options.workflowId, executionId: options.executionId, runId: options.runId, - transport: 'headless', + transport: CopilotTransport.Headless, }, async (otelContext) => { try { diff --git a/apps/sim/lib/copilot/request/lifecycle/run.ts b/apps/sim/lib/copilot/request/lifecycle/run.ts index ae504a6e596..a664ec3eb0d 100644 --- a/apps/sim/lib/copilot/request/lifecycle/run.ts +++ b/apps/sim/lib/copilot/request/lifecycle/run.ts @@ -250,13 +250,6 @@ async function runCheckpointLoop( 'Content-Type': 'application/json', ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), 'X-Client-Version': SIM_AGENT_VERSION, - // X-Sim-Request-ID header removed: Sim's logical request ID - // is now the OTel trace ID of the chat POST's root span, - // propagated to Go via the W3C `traceparent` header - // (injected by fetchGo below). Go's `RequestIdentity` picks - // the trace ID up from the extracted context automatically - // when no Sim-Request-ID header is present. Go keeps the - // reader around for back-compat with older Sim deploys. }, body: JSON.stringify(payload), }, diff --git a/apps/sim/lib/copilot/request/lifecycle/start.ts b/apps/sim/lib/copilot/request/lifecycle/start.ts index 40903a20fb4..0d2273a1e8a 100644 --- a/apps/sim/lib/copilot/request/lifecycle/start.ts +++ b/apps/sim/lib/copilot/request/lifecycle/start.ts @@ -16,6 +16,7 @@ import { import { CopilotRequestCancelReason, type CopilotRequestCancelReasonValue, + CopilotTransport, } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceEvent } from '@/lib/copilot/generated/trace-events-v1' @@ -65,11 +66,8 @@ export interface StreamingOrchestrationParams { workspaceId?: string orchestrateOptions: Omit /** - * Pre-started gen_ai.agent.execute root returned by - * `startCopilotOtelRoot`. When provided, this stream binds every nested - * span to that root and calls `finish()` on termination. When omitted, - * this function starts its own root internally (kept for back-compat - * with the headless path). + * Pre-started root; child spans bind to it and `finish()` fires on + * termination. Omit to let the stream start its own root (headless). */ otelRoot?: ReturnType } @@ -93,11 +91,7 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS otelRoot, } = params - // If the caller (POST handler) already started the gen_ai.agent.execute - // root so that pre-stream setup work (persistUserMessage, resource - // loads, etc.) could nest under it, reuse that root and finish it from - // our terminal code path via the idempotent `finish`. Otherwise start - // our own so the stream still gets a proper OTel trace. + // Reuse caller's root if provided; otherwise start our own. const activeOtelRoot = otelRoot ?? startCopilotOtelRoot({ @@ -108,7 +102,7 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS executionId, runId, streamId, - transport: 'stream', + transport: CopilotTransport.Stream, }) const abortController = new AbortController() @@ -116,30 +110,8 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS const publisher = new StreamWriter({ streamId, chatId, requestId }) - /** - * Classifies a cancelled outcome into one of the closed-vocabulary - * `CopilotRequestCancelReason` values, and records the result on the - * active OTel root span (attribute + event). - * - * Classification rules: - * - `signal.reason` is in the known explicit-stop set (see - * `AbortReason.*`) → `ExplicitStop`. - * - Otherwise, `publisher.clientDisconnected` → `ClientDisconnect`. - * - Otherwise → `Unknown`, which is a latent bug: the stream aborted - * with a reason we don't recognize and the client never dropped. - * We log an error with the raw reason and record it on the span so - * we can find whichever code path added a new `abort(...)` call - * without updating the contract. - * - * IMPORTANT: `publisher.clientDisconnected` alone is NOT a reliable - * discriminator. When the user clicks Stop, `abortActiveStream` - * fires `abortController.abort(AbortReason.UserStop)`, which closes - * the SSE stream, which causes the BROWSER to disconnect its SSE - * reader, which propagates back as `publisher.markDisconnected()`. - * So on an explicit Stop you observe BOTH the explicit reason AND - * `clientDisconnected=true`. The reason string is the source of - * truth for intent; the disconnect flag is only a fallback. - */ + // Classify cancel: signal.reason (explicit-stop set) wins, then + // clientDisconnected, else Unknown (latent contract bug — log it). const recordCancelled = (errorMessage?: string): CopilotRequestCancelReasonValue => { const rawReason = abortController.signal.reason let cancelReason: CopilotRequestCancelReasonValue @@ -163,11 +135,8 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS return String(rawReason) } })() - // Not user-facing. Signals a contract violation: a code path - // aborted the stream with a reason that isn't in the known set, - // and the client didn't disconnect either. Whoever sees this - // should add the new reason to `AbortReason` / `isExplicitStopReason` - // (if it's explicit) or extend the classifier. + // Contract violation: add the new reason to AbortReason / + // isExplicitStopReason or extend the classifier. logger.error(`[${requestId}] Stream cancelled with unknown abort reason`, { streamId, chatId, @@ -189,10 +158,8 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS async start(controller) { publisher.attach(controller) - // Re-enter the root OTel context. Node's AsyncLocalStorage does - // not survive the Next.js handler -> ReadableStream.start boundary, - // so nested `withCopilotSpan` / `withDbSpan` calls would otherwise - // orphan into new traces. + // Re-enter the root OTel context — ALS doesn't survive the + // Next handler → ReadableStream.start boundary. await otelContextApi.with(activeOtelRoot.context, async () => { const otelContext = activeOtelRoot.context let rootOutcome: CopilotLifecycleOutcome = RequestTraceV1Outcome.error @@ -315,7 +282,16 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS error: error instanceof Error ? error.message : 'Stream error', }) } - logger.error(`[${requestId}] Unexpected orchestration error:`, error) + // Demote to warn when the throw came from a user-initiated + // cancel — it isn't an "unexpected" failure then, and the + // error-level log pollutes alerting on normal Stop presses. + const logFn = + outcome === RequestTraceV1Outcome.cancelled ? logger.warn : logger.error + logFn.call( + logger, + `[${requestId}] Orchestration ended with ${outcome}:`, + error + ) const syntheticResult = { success: false as const, @@ -367,7 +343,11 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS usage: lifecycleResult?.usage, cost: lifecycleResult?.cost, }) - reportTrace(trace, otelContext).catch(() => {}) + reportTrace(trace, otelContext).catch((err) => { + logger.warn(`[${requestId}] Failed to report trace`, { + error: err instanceof Error ? err.message : String(err), + }) + }) rootOutcome = outcome if (lifecycleResult?.usage) { activeOtelRoot.span.setAttributes({ @@ -400,22 +380,11 @@ export function createSSEStream(params: StreamingOrchestrationParams): ReadableS // in-flight `publisher.publish` calls silently no-op (prevents // enqueueing on a closed controller). // - // Intentionally does NOT fire the AbortController here. The - // abort controller is reserved for actual "abort this request" - // semantics (driven by `abortActiveStream()` on an explicit Stop - // or the Redis-marker poller for cross-node Stops). Firing it - // on browser disconnect means a successful stream that loses - // its reader at the last moment would get retroactively - // classified as aborted — which skips persisting the assistant - // message (see trace 707f2614 where the whole response - // disappeared after completion). - // - // Trade-off: on a true tab close, the orchestrator keeps reading - // events from Go until Go's stream ends, with `publish` no-op'ing - // each one. That's wasted LLM work but it's safe — the message - // gets persisted and the next chat reload shows it. An - // explicit Stop short-circuits this path cleanly via the - // /chat/abort handler, which DOES fire the AbortController. + // Browser disconnect is NOT an abort — firing the controller + // here retroactively reclassifies in-flight successful streams + // as aborted and skips assistant persistence. Let the + // orchestrator drain naturally; publish no-ops post-disconnect. + // Explicit Stop still fires the controller via /chat/abort. publisher.markDisconnected() }, }) diff --git a/apps/sim/lib/copilot/request/otel.ts b/apps/sim/lib/copilot/request/otel.ts index ae6ef9d887f..ec11e4ce9e2 100644 --- a/apps/sim/lib/copilot/request/otel.ts +++ b/apps/sim/lib/copilot/request/otel.ts @@ -14,29 +14,18 @@ import { RequestTraceV1Outcome } from '@/lib/copilot/generated/request-trace-v1' import { CopilotBranchKind, CopilotSurface, + CopilotTransport, } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { contextFromRequestHeaders } from '@/lib/copilot/request/go/propagation' -/** - * OTel GenAI experimental semantic conventions env var. When set to a - * truthy value, each `gen_ai.*` span carries the full input and - * output conversation content as attributes. Mirrors the Go-side - * gate in `copilot/internal/providers/telemetry.go` so operators - * control both halves with one variable. - * - * Spec: https://opentelemetry.io/docs/specs/semconv/gen-ai/ - */ +// OTel GenAI content-capture env var (spec: +// https://opentelemetry.io/docs/specs/semconv/gen-ai/). Mirrored on +// the Go side so a single var controls both halves. const GENAI_CAPTURE_ENV = 'OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT' -/** - * Attribute-size cap for `gen_ai.{input,output}.messages`. Most OTLP - * backends reject attributes larger than ~64 KiB, so we truncate - * proactively to keep the rest of the span alive if a conversation - * runs long. Matches the Go-side cap to keep truncation behavior - * symmetrical between the two halves. - */ +// OTLP backends commonly reject attrs over 64 KiB; cap proactively. const GENAI_MESSAGE_ATTR_MAX_BYTES = 60 * 1024 function isGenAIMessageCaptureEnabled(): boolean { @@ -44,23 +33,9 @@ function isGenAIMessageCaptureEnabled(): boolean { return raw === 'true' || raw === '1' || raw === 'yes' } -/** - * Returns true if `err` is a user-initiated / upstream cancellation - * rather than a genuine failure. We check every flavor that the - * JS/Node runtime surfaces when an `AbortSignal` fires: - * - * - `DOMException` with `name === 'AbortError'` (browser + Node 18+ fetch) - * - plain `Error` with `name === 'AbortError'` (older polyfills) - * - Node's undici-shaped `code === 'ABORT_ERR'` - * - Bare `'AbortError'` strings rethrown as errors - * - * Callers use this to suppress `SpanStatusCode.ERROR` on cancel paths — - * dashboards should not light up red every time a user hits Stop. - * Matches the Go-side treatment of `context.Canceled` / - * `context.DeadlineExceeded` in `internal/core/errors.go:RecordError` - * and `internal/storage/postgres/tracing.go:dbSpan.End`. - */ -function isCancellationError(err: unknown): boolean { +// True if `err` is an AbortSignal-fired cancellation (any runtime +// flavor). Callers suppress ERROR status on cancel paths. +export function isCancellationError(err: unknown): boolean { if (err == null) return false if (typeof err === 'object') { const e = err as { name?: unknown; code?: unknown; message?: unknown } @@ -74,13 +49,8 @@ function isCancellationError(err: unknown): boolean { return false } -/** - * Apply terminal status to `span` based on whether the thrown `error` - * is a real failure or a cancellation. Always records the exception - * event for forensics; only sets `codes.ERROR` for real failures. - * Centralized so every span wrapper has identical classification. - */ -function markSpanForError(span: Span, error: unknown): void { +// Record exception + set ERROR only for real failures (cancels stay unset). +export function markSpanForError(span: Span, error: unknown): void { const asError = error instanceof Error ? error : new Error(String(error)) span.recordException(asError) if (!isCancellationError(error)) { @@ -91,13 +61,7 @@ function markSpanForError(span: Span, error: unknown): void { } } -/** - * Canonical OTel GenAI message shape used for both input and output - * attributes. Kept minimal — only the three part types we actually - * emit: `text`, `tool_call`, and `tool_call_response`. Adding more - * part types is cheap, but every additional shape here has to be - * mirrored in the Go serializer. - */ +// OTel GenAI message shape (kept minimal). Mirror changes on the Go side. interface GenAIAgentPart { type: 'text' | 'tool_call' | 'tool_call_response' content?: string @@ -140,12 +104,12 @@ function marshalAgentMessages(messages: GenAIAgentMessage[]): string | undefined return final.length <= GENAI_MESSAGE_ATTR_MAX_BYTES ? final : undefined } -export interface CopilotAgentInputMessages { +interface CopilotAgentInputMessages { userMessage?: string systemPrompt?: string } -export interface CopilotAgentOutputMessages { +interface CopilotAgentOutputMessages { assistantText?: string toolCalls?: Array<{ id: string @@ -196,25 +160,12 @@ function setAgentOutputMessages(span: Span, output: CopilotAgentOutputMessages): } } -/** - * Reuse the generated RequestTraceV1Outcome string values for every - * lifecycle outcome field. This keeps our OTel attributes, internal - * TraceCollector outcomes, and the trace-ingestion wire contract all - * using the same three strings ("success" | "error" | "cancelled") - * without scattering the literals through the codebase. - */ export type CopilotLifecycleOutcome = (typeof RequestTraceV1Outcome)[keyof typeof RequestTraceV1Outcome] -/** - * Resolve the tracer lazily on every call. With Next.js 16 + Turbopack dev - * bundling, a module-level `trace.getTracer(...)` call can be evaluated - * before the NodeSDK in `instrumentation-node.ts` installs the real - * TracerProvider. If that happens, the cached tracer is the NoOpTracer, - * which produces NoOpSpans whose `.end()` never reaches any processor — - * silently disabling all OTel on the Sim side. Calling `trace.getTracer` - * per request ensures we always pick up the currently-registered provider. - */ +// Lazy tracer — Next 16/Turbopack can evaluate modules before NodeSDK +// installs the real TracerProvider; resolving per call avoids a +// cached NoOpTracer silently disabling OTel. export function getCopilotTracer() { return trace.getTracer('sim-ai-platform', '1.0.0') } @@ -223,20 +174,8 @@ function getTracer() { return getCopilotTracer() } -/** - * Wrap an inbound Next.js route handler that Go calls into (e.g. billing - * update-cost, api-key validate) so the Sim-side work shows up as a - * child of the originating Go span in the same trace. - * - * Reads `traceparent` / `tracestate` from the request headers, installs - * that remote span as the active parent, and starts a server-kind OTel - * span around `fn`. Any `withCopilotSpan`/`withDbSpan`/etc. call below - * nests automatically via AsyncLocalStorage. - * - * If the request has no trace context (e.g. hand-rolled curl, browser - * test), this still produces a valid root span for the handler — you - * just won't see the Go-side parent. - */ +// Wrap an inbound handler that Go called into so its span parents +// under the Go-side trace (via `traceparent`). export async function withIncomingGoSpan( headers: Headers, spanName: string, @@ -264,32 +203,14 @@ export async function withIncomingGoSpan( ) } -/** - * Generic helper for wrapping a copilot-lifecycle operation in an OTel - * span. Use this for post-tool processing, session recovery, subagent - * orchestration, async-runs DB calls, etc. — anywhere the work is part - * of a mothership request and we want it reflected in the external OTLP - * trace. - * - * The returned span honors the currently-active OTel context, so it - * threads under `gen_ai.agent.execute` (or a `tool.execute` parent) if - * one is live. If there's no active span, it becomes a root — which is - * almost never what you want; call this from inside a mothership request - * handler, not from arbitrary background code. - */ +// Wrap a copilot-lifecycle op in an OTel span. Pass `parentContext` +// explicitly when AsyncLocalStorage-tracked context can be dropped +// across multiple awaits (otherwise the child falls back to a framework +// span that the sampler drops). export async function withCopilotSpan( spanName: string, attributes: Record | undefined, fn: (span: Span) => Promise, - /** - * Optional explicit parent context. Useful when the caller is in a - * code path where Next.js / Turbopack / multiple awaits can drop the - * AsyncLocalStorage-tracked context we installed at the top of the - * request — passing the captured root context explicitly guarantees - * the new span parents correctly instead of falling back to whatever - * framework span is currently active (which then gets dropped by our - * sampler, stranding this span in the trace). - */ parentContext?: Context ): Promise { const tracer = getTracer() @@ -311,12 +232,8 @@ export async function withCopilotSpan( return tracer.startActiveSpan(spanName, { attributes }, runBody) } -/** - * Run `fn` inside an OTel `tool.execute` span. This mirrors the internal - * TraceCollector span that already wraps Sim-side tool work, so the - * external OTLP trace reflects the actual tool execution (the Go side's - * `tool.execute` is just the async enqueue and stays ~0ms). - */ +// External OTel `tool.execute` span for Sim-side tool work (the Go +// side's `tool.execute` is just the enqueue, stays ~0ms). export async function withCopilotToolSpan( input: { toolName: string @@ -341,7 +258,11 @@ export async function withCopilotToolSpan( ...(typeof input.argsBytes === 'number' ? { [TraceAttr.ToolArgsBytes]: input.argsBytes } : {}), - ...(input.argsPreview ? { [TraceAttr.ToolArgsPreview]: input.argsPreview } : {}), + // argsPreview can leak pasted credentials in tool args; gate + // behind the GenAI content-capture env var. + ...(input.argsPreview && isGenAIMessageCaptureEnabled() + ? { [TraceAttr.ToolArgsPreview]: input.argsPreview } + : {}), }, }, async (span) => { @@ -376,17 +297,11 @@ function createFallbackSpanContext(): SpanContext { } } -export interface CopilotOtelScope { - /** - * Optional override for the logical request ID surfaced on - * `request.id` / `sim.request_id` span attributes. Leave unset on - * the primary chat POST path — `startCopilotOtelRoot` will derive - * it from the newly-created root span's OTel trace ID, which is the - * same 32-hex value that flows through `traceparent` and shows up - * in Grafana. Pass an explicit value only for paths that need a - * non-trace-derived identifier (e.g. headless / resume taking an - * ID from persisted state). - */ +interface CopilotOtelScope { + // Leave unset on the chat POST — startCopilotOtelRoot will derive + // from the root span's OTel trace ID (same value Grafana uses). + // Set explicitly on paths that need a non-trace-derived ID (headless, + // resume taking an ID from persisted state). requestId?: string route?: string chatId?: string @@ -395,64 +310,38 @@ export interface CopilotOtelScope { runId?: string streamId?: string transport: 'headless' | 'stream' - /** - * First ~500 chars of the user's prompt, surfaced as - * `copilot.user.message_preview` on the root span. Lets dashboards - * show a "what was this request about" column without having to - * parse the full `gen_ai.input.messages` JSON attribute (which is - * also gated on a separate env var). Safe even when full-content - * capture is off — a preview snippet is useful for operators - * scanning trace lists, low-risk relative to full prompts. - */ userMessagePreview?: string } -/** - * Max characters kept in `copilot.user.message_preview`. Chosen to - * fit in a dashboard table cell without truncation (most Grafana - * table cells render ~300 chars before wrapping), but long enough - * to disambiguate requests in triage. - */ +// Dashboard-column width; long enough for triage disambiguation. const USER_MESSAGE_PREVIEW_MAX_CHARS = 500 - -/** - * Build the canonical `gen_ai.agent.execute` attribute set from a scope. - * Shared between `withCopilotOtelContext` (fully-managed lifetime) and - * `startCopilotOtelRoot` (manually-managed, for handlers that need the - * span to outlive the synchronous handler body — e.g. SSE routes). - */ function buildAgentSpanAttributes( scope: CopilotOtelScope & { requestId: string } ): Record { - const preview = truncateUserMessagePreview(scope.userMessagePreview) + // Gated behind the same env var as full GenAI message capture — a + // 500-char preview is still user prompt content. + const preview = isGenAIMessageCaptureEnabled() + ? truncateUserMessagePreview(scope.userMessagePreview) + : undefined return { - 'gen_ai.agent.name': 'mothership', - 'gen_ai.agent.id': scope.transport === 'stream' ? 'mothership-stream' : 'mothership-headless', - 'gen_ai.operation.name': scope.transport === 'stream' ? 'chat' : 'invoke_agent', - // `request.id` and `sim.request_id` intentionally carry the SAME - // value. For chat POSTs (where scope.requestId is not provided - // by the caller) this is the OTel trace ID of this root span — - // meaning the value pasted from the UI's "copy request ID" - // button works directly in Grafana's trace-ID search box. - 'request.id': scope.requestId, - 'sim.request_id': scope.requestId, - 'copilot.route': scope.route ?? '', - 'copilot.transport': scope.transport, - ...(scope.chatId ? { 'chat.id': scope.chatId } : {}), - ...(scope.workflowId ? { 'workflow.id': scope.workflowId } : {}), - ...(scope.executionId ? { 'workflow.execution_id': scope.executionId } : {}), - ...(scope.runId ? { 'run.id': scope.runId } : {}), - ...(scope.streamId ? { 'stream.id': scope.streamId } : {}), - ...(preview ? { 'copilot.user.message_preview': preview } : {}), + [TraceAttr.GenAiAgentName]: 'mothership', + [TraceAttr.GenAiAgentId]: + scope.transport === CopilotTransport.Stream ? 'mothership-stream' : 'mothership-headless', + [TraceAttr.GenAiOperationName]: + scope.transport === CopilotTransport.Stream ? 'chat' : 'invoke_agent', + [TraceAttr.RequestId]: scope.requestId, + [TraceAttr.SimRequestId]: scope.requestId, + [TraceAttr.CopilotRoute]: scope.route ?? '', + [TraceAttr.CopilotTransport]: scope.transport, + ...(scope.chatId ? { [TraceAttr.ChatId]: scope.chatId } : {}), + ...(scope.workflowId ? { [TraceAttr.WorkflowId]: scope.workflowId } : {}), + ...(scope.executionId ? { [TraceAttr.CopilotExecutionId]: scope.executionId } : {}), + ...(scope.runId ? { [TraceAttr.RunId]: scope.runId } : {}), + ...(scope.streamId ? { [TraceAttr.StreamId]: scope.streamId } : {}), + ...(preview ? { [TraceAttr.CopilotUserMessagePreview]: preview } : {}), } } -/** - * Collapse newlines and trim the user's prompt to a fixed length so - * it fits cleanly in a single dashboard table cell. Non-strings are - * ignored (the chat schema enforces string, but this is defensive - * against upstream shape changes). - */ function truncateUserMessagePreview(raw: unknown): string | undefined { if (typeof raw !== 'string') return undefined const collapsed = raw.replace(/\s+/g, ' ').trim() @@ -461,114 +350,41 @@ function truncateUserMessagePreview(raw: unknown): string | undefined { return `${collapsed.slice(0, USER_MESSAGE_PREVIEW_MAX_CHARS - 1)}…` } -/** - * Start a `gen_ai.agent.execute` root span with manually-managed - * lifetime. Returns the span, its context, and a `finish` callback the - * caller MUST invoke when the whole request lifecycle is over (including - * any SSE streaming that outlives the Next.js handler return). - * - * Use this for the chat POST handler path: - * 1. Start the root at the top so `persistUserMessage` and every other - * setup span is a child instead of orphaning into a new trace. - * 2. Pass the context into `createSSEStream` so the stream callback - * re-enters it (AsyncLocalStorage does not survive the Next.js - * handler return into the ReadableStream runtime). - * 3. Call `finish()` from the stream's terminal code path. - * - * Prefer `withCopilotOtelContext` when the work is fully inside one - * async function (e.g. headless invoke) — it handles the lifecycle for - * you. - */ -/** - * Request-shape metadata that's only known AFTER the branch resolves - * (can't be set at startCopilotOtelRoot time). Stamped on the root - * `gen_ai.agent.execute` span so dashboards can slice requests by how - * they were sent: which product surface, which mode, which model, with - * attachments or not, and whether the request arrived while a prior - * stream was still alive (i.e. user hit send-to-interrupt). - */ -export interface CopilotOtelRequestShape { - /** - * Product surface. Derived from `branch.kind` — "workflow" means the - * copilot sidebar (attached to a specific workflow), "workspace" - * means the mothership workspace-level chat. Also stamped as a - * human-friendly `copilot.surface` (`copilot` | `mothership`). - */ +// Request-shape metadata known only after branch resolution. Stamped +// on the root span for dashboard filtering. +interface CopilotOtelRequestShape { branchKind?: 'workflow' | 'workspace' - /** Mothership request mode — `agent`, `ask`, `build`, etc. */ mode?: string - /** LLM model identifier the caller selected. */ model?: string - /** LLM provider the caller selected (`anthropic`, `openai`, …). */ provider?: string - /** Whether this POST created a brand-new chat. */ createNewChat?: boolean - /** `true` when the caller sent `prefetch: true` (UI speculative send). */ prefetch?: boolean - /** How many file attachments were present. */ fileAttachmentsCount?: number - /** How many resource attachments (workspace files, knowledge, …). */ resourceAttachmentsCount?: number - /** Free-form context blocks the caller attached. */ contextsCount?: number - /** Explicit commands (e.g. slash commands) present in the request. */ commandsCount?: number - /** - * Time spent waiting for the per-chat stream lock, in ms. Values - * above ~50ms strongly imply this request arrived while a prior - * stream for the same chat was still in flight (i.e. user pressed - * send-to-interrupt, or a tab refresh overlapped with an active - * request). - */ pendingStreamWaitMs?: number - /** True if `pendingStreamWaitMs` was non-trivially long. */ interruptedPriorStream?: boolean } -export interface CopilotOtelRoot { +interface CopilotOtelRoot { span: Span context: Context finish: (outcome?: CopilotLifecycleOutcome, error?: unknown) => void - /** - * Record `gen_ai.input.messages` on the root agent span. Gated on - * `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT` — no-op when - * capture is disabled. Safe to call multiple times; the latest - * call wins. - */ setInputMessages: (input: CopilotAgentInputMessages) => void - /** - * Record `gen_ai.output.messages` on the root agent span. Gated on - * the same env var as `setInputMessages`. Typically called from the - * stream finalize callback once the assistant's final content and - * invoked tool calls are known. - */ setOutputMessages: (output: CopilotAgentOutputMessages) => void - /** - * Stamp request-shape attributes that are only known after the - * branch resolves (mode, provider, model, surface, attachment - * counts, interrupt signal). Safe to call multiple times — later - * calls override earlier ones for the same key. - */ setRequestShape: (shape: CopilotOtelRequestShape) => void } export function startCopilotOtelRoot( scope: CopilotOtelScope ): CopilotOtelRoot & { requestId: string } { - // Create gen_ai.agent.execute as a TRUE root span — do not inherit - // from Next.js's HTTP handler span. The framework span is dropped by - // our sampler (it has `next.span_type`), so if we parented under it, - // this span would appear orphaned in Jaeger ("span has missing parent" - // warning) and any descendant whose AsyncLocalStorage propagation was - // disrupted would inherit the same dropped parent. Starting from - // ROOT_CONTEXT gives the mothership lifecycle its own clean trace tree. + // TRUE root — don't inherit from Next's HTTP handler span (the + // sampler drops those; we'd orphan the whole mothership tree). const parentContext = ROOT_CONTEXT - // Start the span FIRST with a placeholder requestId, so we can read - // its actual trace ID and stamp it as the canonical `request.id`. - // This makes the ID the UI exposes (via `msg.requestId`) identical - // to the trace ID Grafana uses — one ID, pasteable anywhere. When - // the caller provided an explicit override (resume / headless / - // tests) we keep that instead. + // Start with a placeholder `requestId`, then overwrite using the + // span's actual trace ID so the UI copy-button value pastes + // directly into Grafana. const span = getTracer().startSpan( TraceSpan.GenAiAgentExecute, { attributes: buildAgentSpanAttributes({ ...scope, requestId: '' }) }, @@ -578,18 +394,11 @@ export function startCopilotOtelRoot( ? span : trace.wrapSpanContext(createFallbackSpanContext()) const spanContext = carrierSpan.spanContext() - // Derived ID: use the caller's override when given, otherwise the - // real OTel trace ID. Fall back to an empty string only when OTel - // itself failed to produce a valid span (shouldn't happen in prod - // but the carrier branch above already handles that defensively). const requestId = scope.requestId ?? (spanContext.traceId && spanContext.traceId.length === 32 ? spanContext.traceId : '') - // Re-stamp with the resolved ID (overwriting the placeholder empties - // set above). Cheap — both `request.id` and `sim.request_id` get the - // same value. - span.setAttribute('request.id', requestId) - span.setAttribute('sim.request_id', requestId) + span.setAttribute(TraceAttr.RequestId, requestId) + span.setAttribute(TraceAttr.SimRequestId, requestId) const rootContext = trace.setSpan(parentContext, carrierSpan) let finished = false @@ -599,10 +408,6 @@ export function startCopilotOtelRoot( const resolvedOutcome = outcome ?? RequestTraceV1Outcome.success span.setAttribute(TraceAttr.CopilotRequestOutcome, resolvedOutcome) if (error) { - // `markSpanForError` records the exception event but only sets - // `codes.ERROR` for real failures — a cancellation-shaped error - // here stays `unset` (or `OK` if we resolve it below) so the - // trace doesn't look red when the user intentionally stopped. markSpanForError(span, error) if (isCancellationError(error)) { span.setStatus({ code: SpanStatusCode.OK }) @@ -611,10 +416,8 @@ export function startCopilotOtelRoot( resolvedOutcome === RequestTraceV1Outcome.success || resolvedOutcome === RequestTraceV1Outcome.cancelled ) { - // Explicitly mark cancelled outcomes as OK so dashboards keying - // off span status don't treat "user hit Stop" as a failure — the - // rich detail lives on `copilot.request.cancel_reason` and the - // `request.cancelled` event. + // Cancelled = OK so dashboards keying off span status don't + // treat Stop as a failure. Detail lives on cancel_reason. span.setStatus({ code: SpanStatusCode.OK }) } span.end() @@ -623,9 +426,6 @@ export function startCopilotOtelRoot( return { span, context: rootContext, - // Surface the resolved requestId so callers can thread it through - // trackers, log prefixes, and persisted `msg.requestId` without - // having to dig it back out of span attributes. requestId, finish, setInputMessages: (input) => setAgentInputMessages(span, input), @@ -634,13 +434,7 @@ export function startCopilotOtelRoot( } } -/** - * Threshold (ms) above which we consider a pending-stream-lock wait - * to indicate this request interrupted a prior in-flight stream. Well - * above the typical uncontested acquire (<10ms) but below any normal - * human-caused delay. Tuned to flag overlap cases — not perfect, but - * useful for filtering dashboards. - */ +// Pending-stream-lock wait above this = inferred send-to-interrupt. const INTERRUPT_WAIT_MS_THRESHOLD = 50 function applyRequestShape(span: Span, shape: CopilotOtelRequestShape): void { @@ -707,8 +501,8 @@ export async function withCopilotOtelContext( scope.requestId ?? (spanContext.traceId && spanContext.traceId.length === 32 ? spanContext.traceId : '') if (resolvedRequestId) { - span.setAttribute('request.id', resolvedRequestId) - span.setAttribute('sim.request_id', resolvedRequestId) + span.setAttribute(TraceAttr.RequestId, resolvedRequestId) + span.setAttribute(TraceAttr.SimRequestId, resolvedRequestId) } const otelContext = trace.setSpan(parentContext, carrierSpan) let terminalStatusSet = false diff --git a/apps/sim/lib/copilot/request/session/abort.ts b/apps/sim/lib/copilot/request/session/abort.ts index ea0dcdb913d..d55640b9dcd 100644 --- a/apps/sim/lib/copilot/request/session/abort.ts +++ b/apps/sim/lib/copilot/request/session/abort.ts @@ -131,9 +131,9 @@ export async function acquirePendingChatStream( return withCopilotSpan( TraceSpan.CopilotChatAcquirePendingStreamLock, { - 'chat.id': chatId, - 'stream.id': streamId, - 'lock.timeout_ms': timeoutMs, + [TraceAttr.ChatId]: chatId, + [TraceAttr.StreamId]: streamId, + [TraceAttr.LockTimeoutMs]: timeoutMs, }, async (span) => { const redis = getRedisClient() diff --git a/apps/sim/lib/copilot/request/session/recovery.ts b/apps/sim/lib/copilot/request/session/recovery.ts index c11ba83ea76..8bad72847a0 100644 --- a/apps/sim/lib/copilot/request/session/recovery.ts +++ b/apps/sim/lib/copilot/request/session/recovery.ts @@ -32,9 +32,9 @@ export async function checkForReplayGap( return withCopilotSpan( TraceSpan.CopilotRecoveryCheckReplayGap, { - 'stream.id': streamId, - 'copilot.recovery.requested_after_seq': requestedAfterSeq, - ...(requestId ? { 'request.id': requestId } : {}), + [TraceAttr.StreamId]: streamId, + [TraceAttr.CopilotRecoveryRequestedAfterSeq]: requestedAfterSeq, + ...(requestId ? { [TraceAttr.RequestId]: requestId } : {}), }, async (span) => { const oldestSeq = await getOldestSeq(streamId) diff --git a/apps/sim/lib/copilot/request/subagent.ts b/apps/sim/lib/copilot/request/subagent.ts index 1672bf6570c..ff1440a3ee2 100644 --- a/apps/sim/lib/copilot/request/subagent.ts +++ b/apps/sim/lib/copilot/request/subagent.ts @@ -58,11 +58,11 @@ export async function orchestrateSubagentStream( return withCopilotSpan( TraceSpan.CopilotSubagentExecute, { - 'subagent.id': agentId, - 'user.id': options.userId, - ...(options.simRequestId ? { 'sim.request_id': options.simRequestId } : {}), - ...(options.workflowId ? { 'workflow.id': options.workflowId } : {}), - ...(options.workspaceId ? { 'workspace.id': options.workspaceId } : {}), + [TraceAttr.SubagentId]: agentId, + [TraceAttr.UserId]: options.userId, + ...(options.simRequestId ? { [TraceAttr.SimRequestId]: options.simRequestId } : {}), + ...(options.workflowId ? { [TraceAttr.WorkflowId]: options.workflowId } : {}), + ...(options.workspaceId ? { [TraceAttr.WorkspaceId]: options.workspaceId } : {}), }, async (otelSpan) => { const result = await orchestrateSubagentStreamInner(agentId, requestPayload, options) @@ -137,9 +137,6 @@ async function orchestrateSubagentStreamInner( 'Content-Type': 'application/json', ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), 'X-Client-Version': SIM_AGENT_VERSION, - // X-Sim-Request-ID removed — Go derives the logical request - // ID from the propagated W3C `traceparent` now. See - // lifecycle/run.ts for the full rationale. }, body: JSON.stringify({ ...requestPayload, diff --git a/apps/sim/lib/copilot/request/tools/files.ts b/apps/sim/lib/copilot/request/tools/files.ts index 6b4958e9151..80c4371cd5f 100644 --- a/apps/sim/lib/copilot/request/tools/files.ts +++ b/apps/sim/lib/copilot/request/tools/files.ts @@ -2,6 +2,7 @@ import { createLogger } from '@sim/logger' import { FunctionExecute, UserTable } from '@/lib/copilot/generated/tool-catalog-v1' import { CopilotOutputFileOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' +import { TraceEvent } from '@/lib/copilot/generated/trace-events-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/request/types' @@ -156,8 +157,8 @@ export async function maybeWriteOutputToFile( return withCopilotSpan( TraceSpan.CopilotToolsWriteOutputFile, { - 'tool.name': toolName, - 'workspace.id': context.workspaceId, + [TraceAttr.ToolName]: toolName, + [TraceAttr.WorkspaceId]: context.workspaceId, }, async (span) => { try { @@ -216,8 +217,8 @@ export async function maybeWriteOutputToFile( error: message, }) span.setAttribute(TraceAttr.CopilotOutputFileOutcome, CopilotOutputFileOutcome.Failed) - span.addEvent('copilot.output_file.error', { - 'error.message': message.slice(0, 500), + span.addEvent(TraceEvent.CopilotOutputFileError, { + [TraceAttr.ErrorMessage]: message.slice(0, 500), }) return { success: false, diff --git a/apps/sim/lib/copilot/request/tools/resources.ts b/apps/sim/lib/copilot/request/tools/resources.ts index bb757350769..15fd19e6552 100644 --- a/apps/sim/lib/copilot/request/tools/resources.ts +++ b/apps/sim/lib/copilot/request/tools/resources.ts @@ -46,8 +46,8 @@ export async function handleResourceSideEffects( return withCopilotSpan( TraceSpan.CopilotToolsHandleResourceSideEffects, { - 'tool.name': toolName, - 'chat.id': chatId, + [TraceAttr.ToolName]: toolName, + [TraceAttr.ChatId]: chatId, }, async (span) => { let isDeleteOp = false diff --git a/apps/sim/lib/copilot/request/tools/tables.ts b/apps/sim/lib/copilot/request/tools/tables.ts index 1fed85102af..2949a66a9eb 100644 --- a/apps/sim/lib/copilot/request/tools/tables.ts +++ b/apps/sim/lib/copilot/request/tools/tables.ts @@ -6,6 +6,7 @@ import { eq } from 'drizzle-orm' import { FunctionExecute, Read as ReadTool } from '@/lib/copilot/generated/tool-catalog-v1' import { CopilotTableOutcome } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' +import { TraceEvent } from '@/lib/copilot/generated/trace-events-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' import { withCopilotSpan } from '@/lib/copilot/request/otel' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/request/types' @@ -32,9 +33,9 @@ export async function maybeWriteOutputToTable( return withCopilotSpan( TraceSpan.CopilotToolsWriteOutputTable, { - 'tool.name': toolName, - 'copilot.table.id': outputTable, - 'workspace.id': context.workspaceId, + [TraceAttr.ToolName]: toolName, + [TraceAttr.CopilotTableId]: outputTable, + [TraceAttr.WorkspaceId]: context.workspaceId, }, async (span) => { try { @@ -139,8 +140,8 @@ export async function maybeWriteOutputToTable( error: err instanceof Error ? err.message : String(err), }) span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.Failed) - span.addEvent('copilot.table.error', { - 'error.message': (err instanceof Error ? err.message : String(err)).slice(0, 500), + span.addEvent(TraceEvent.CopilotTableError, { + [TraceAttr.ErrorMessage]: (err instanceof Error ? err.message : String(err)).slice(0, 500), }) return { success: false, @@ -167,9 +168,9 @@ export async function maybeWriteReadCsvToTable( return withCopilotSpan( TraceSpan.CopilotToolsWriteCsvToTable, { - 'tool.name': toolName, - 'copilot.table.id': outputTable, - 'workspace.id': context.workspaceId, + [TraceAttr.ToolName]: toolName, + [TraceAttr.CopilotTableId]: outputTable, + [TraceAttr.WorkspaceId]: context.workspaceId, }, async (span) => { try { @@ -286,8 +287,8 @@ export async function maybeWriteReadCsvToTable( error: err instanceof Error ? err.message : String(err), }) span.setAttribute(TraceAttr.CopilotTableOutcome, CopilotTableOutcome.Failed) - span.addEvent('copilot.table.error', { - 'error.message': (err instanceof Error ? err.message : String(err)).slice(0, 500), + span.addEvent(TraceEvent.CopilotTableError, { + [TraceAttr.ErrorMessage]: (err instanceof Error ? err.message : String(err)).slice(0, 500), }) return { success: false, diff --git a/apps/sim/lib/copilot/request/trace.ts b/apps/sim/lib/copilot/request/trace.ts index 05d250c77de..cb399959d7d 100644 --- a/apps/sim/lib/copilot/request/trace.ts +++ b/apps/sim/lib/copilot/request/trace.ts @@ -134,7 +134,7 @@ export async function reportTrace( spanName: 'sim → go /api/traces', operation: 'report_trace', attributes: { - 'copilot.request.id': trace.simRequestId ?? '', + [TraceAttr.RequestId]: trace.simRequestId ?? '', [TraceAttr.HttpRequestContentLength]: body.length, [TraceAttr.CopilotTraceSpanCount]: trace.spans?.length ?? 0, }, diff --git a/apps/sim/lib/copilot/tools/client/run-tool-execution.ts b/apps/sim/lib/copilot/tools/client/run-tool-execution.ts index a7c2e57bdf2..ae1a36b5f1e 100644 --- a/apps/sim/lib/copilot/tools/client/run-tool-execution.ts +++ b/apps/sim/lib/copilot/tools/client/run-tool-execution.ts @@ -466,9 +466,6 @@ async function reportCompletion( }) const res = await fetch(COPILOT_CONFIRM_API_PATH, { method: 'POST', - // Propagate the chat's root traceparent so the confirm handler - // becomes a child span of the original request's trace. See - // `trace-context.ts` for why this lives in a module singleton. headers: { 'Content-Type': 'application/json', ...traceparentHeader() }, body, }) diff --git a/apps/sim/lib/copilot/tools/client/trace-context.ts b/apps/sim/lib/copilot/tools/client/trace-context.ts index c87c6ebff8d..30c6eecdba6 100644 --- a/apps/sim/lib/copilot/tools/client/trace-context.ts +++ b/apps/sim/lib/copilot/tools/client/trace-context.ts @@ -1,57 +1,16 @@ -/** - * Browser-side mutable holder for the W3C `traceparent` of the - * current copilot chat stream. - * - * Why this exists as a module-level singleton rather than React - * state / ref: the client-tool-execution code path fires off HTTP - * callbacks (`/api/copilot/confirm`) from arbitrary depth inside - * tool runners that aren't children of any React component tree — - * some are triggered from workflow-runtime callbacks, iframed - * editors, or generic promise chains. Threading a trace id through - * those layers would require changing a dozen function signatures - * across packages we don't control. - * - * A module-level holder works because the browser only ever has one - * active copilot chat at a time (the UI gates sending a new one on - * the stop-barrier). The chat-session hook writes this on the first - * chat POST response and nulls it out when the stream terminates, - * so client tool callbacks emitted during that window can read the - * right value without plumbing. - * - * Not an `export const obj`; using getters/setters so callers can't - * accidentally mutate the backing field (e.g. a stale ref held from - * before a new chat started). Keep this module tiny — it has one - * job. - */ +// Browser-side W3C traceparent holder for the active copilot chat. +// Module-level singleton because client tool callbacks fire from deep +// inside runtime code that can't thread a React ref. The browser only +// has one active chat at a time (gated by the stop-barrier), so a +// singleton is safe. let currentTraceparent: string | undefined -/** - * Set the traceparent for the current chat stream. Called by the - * chat-session hook after receiving the `traceparent` response - * header from the initial chat POST. Pass `undefined` to clear it - * when the stream terminates or a new chat begins. - */ export function setCurrentChatTraceparent(value: string | undefined): void { currentTraceparent = value } -/** - * Read the traceparent for the currently-active chat. Returns - * `undefined` if no chat is in-flight — callers should fall through - * without a traceparent header in that case, NOT block or throw. - */ -export function getCurrentChatTraceparent(): string | undefined { - return currentTraceparent -} - -/** - * Convenience: header spread suitable for inclusion in `fetch` init - * objects. Returns `{}` when no traceparent is set so the spread is - * safe to use unconditionally: - * - * await fetch(url, { headers: { ...tracepa rentHeader(), ... } }) - */ +// `fetch` header spread: `headers: { ...traceparentHeader(), ... }`. export function traceparentHeader(): Record { const tp = currentTraceparent return tp ? { traceparent: tp } : {} diff --git a/apps/sim/lib/copilot/tools/handlers/vfs.ts b/apps/sim/lib/copilot/tools/handlers/vfs.ts index 35ab7b9c23b..a3a1245edba 100644 --- a/apps/sim/lib/copilot/tools/handlers/vfs.ts +++ b/apps/sim/lib/copilot/tools/handlers/vfs.ts @@ -169,7 +169,7 @@ export async function executeVfsRead( logger.warn('Upload read result too large', { path, hasAttachment: isImage, - contentPreview: uploadResult.content.slice(0, 120), + contentLength: uploadResult.content.length, serializedSize: serializedResultSize(uploadResult), }) return { @@ -211,7 +211,7 @@ export async function executeVfsRead( logger.warn('File read result too large', { path, hasAttachment: isImage, - contentPreview: fileContent.content.slice(0, 120), + contentLength: fileContent.content.length, serializedSize: serializedResultSize(fileContent), }) return { diff --git a/apps/sim/lib/copilot/vfs/file-reader.ts b/apps/sim/lib/copilot/vfs/file-reader.ts index e8a16b01db7..2e6bc59d459 100644 --- a/apps/sim/lib/copilot/vfs/file-reader.ts +++ b/apps/sim/lib/copilot/vfs/file-reader.ts @@ -1,4 +1,4 @@ -import { type Span, SpanStatusCode, trace } from '@opentelemetry/api' +import { type Span, trace } from '@opentelemetry/api' import { createLogger } from '@sim/logger' import { CopilotVfsOutcome, @@ -6,31 +6,30 @@ import { CopilotVfsReadPath, } from '@/lib/copilot/generated/trace-attribute-values-v1' import { TraceAttr } from '@/lib/copilot/generated/trace-attributes-v1' +import { TraceEvent } from '@/lib/copilot/generated/trace-events-v1' import { TraceSpan } from '@/lib/copilot/generated/trace-spans-v1' +import { markSpanForError } from '@/lib/copilot/request/otel' import type { WorkspaceFileRecord } from '@/lib/uploads/contexts/workspace/workspace-file-manager' import { downloadWorkspaceFile } from '@/lib/uploads/contexts/workspace/workspace-file-manager' import { isImageFileType } from '@/lib/uploads/utils/file-utils' -/** - * Lazy tracer (see lib/copilot/request/otel.ts for the same pattern and - * why we resolve on every call). - */ +// Lazy tracer (same pattern as lib/copilot/request/otel.ts). function getVfsTracer() { return trace.getTracer('sim-copilot-vfs', '1.0.0') } function recordSpanError(span: Span, err: unknown) { - span.setStatus({ - code: SpanStatusCode.ERROR, - message: err instanceof Error ? err.message : String(err), - }) - span.recordException(err instanceof Error ? err : new Error(String(err))) + markSpanForError(span, err) } const logger = createLogger('FileReader') const MAX_TEXT_READ_BYTES = 5 * 1024 * 1024 // 5 MB const MAX_IMAGE_READ_BYTES = 5 * 1024 * 1024 // 5 MB +// Parseable-document byte cap. Large office/PDF files can still +// produce huge extracted text; reject up front to avoid wasting a +// download + parse only to blow past the tool-result budget. +const MAX_PARSEABLE_READ_BYTES = 5 * 1024 * 1024 // 5 MB const MAX_IMAGE_DIMENSION = 1568 const IMAGE_RESIZE_DIMENSIONS = [1568, 1280, 1024, 768] const IMAGE_QUALITY_STEPS = [85, 70, 55, 40] @@ -190,11 +189,11 @@ async function prepareImageForVision( mediaType: 'image/jpeg', } - span.addEvent('copilot.vfs.resize_attempt', { - 'copilot.vfs.resize.dimension': dimension, - 'copilot.vfs.resize.quality': quality, - 'copilot.vfs.resize.output_bytes': transformed.buffer.length, - 'copilot.vfs.resize.fits_budget': transformed.buffer.length <= MAX_IMAGE_READ_BYTES, + span.addEvent(TraceEvent.CopilotVfsResizeAttempt, { + [TraceAttr.CopilotVfsResizeDimension]: dimension, + [TraceAttr.CopilotVfsResizeQuality]: quality, + [TraceAttr.CopilotVfsResizeOutputBytes]: transformed.buffer.length, + [TraceAttr.CopilotVfsResizeFitsBudget]: transformed.buffer.length <= MAX_IMAGE_READ_BYTES, }) if (transformed.buffer.length <= MAX_IMAGE_READ_BYTES) { @@ -230,10 +229,11 @@ async function prepareImageForVision( quality, error: err instanceof Error ? err.message : String(err), }) - span.addEvent('copilot.vfs.resize_attempt_failed', { - 'copilot.vfs.resize.dimension': dimension, - 'copilot.vfs.resize.quality': quality, - 'error.message': err instanceof Error ? err.message : String(err).slice(0, 500), + span.addEvent(TraceEvent.CopilotVfsResizeAttemptFailed, { + [TraceAttr.CopilotVfsResizeDimension]: dimension, + [TraceAttr.CopilotVfsResizeQuality]: quality, + [TraceAttr.ErrorMessage]: + err instanceof Error ? err.message : String(err).slice(0, 500), }) } } @@ -348,6 +348,16 @@ export async function readFileRecord(record: WorkspaceFileRecord): Promise MAX_PARSEABLE_READ_BYTES) { + span.setAttribute( + TraceAttr.CopilotVfsReadOutcome, + CopilotVfsReadOutcome.DocumentTooLarge + ) + return { + content: `[Document too large to parse inline: ${record.name} (${record.size} bytes, limit ${MAX_PARSEABLE_READ_BYTES})]`, + totalLines: 1, + } + } const buffer = await downloadWorkspaceFile(record) try { const { parseBuffer } = await import('@/lib/file-parsers') @@ -366,8 +376,8 @@ export async function readFileRecord(record: WorkspaceFileRecord): Promise