diff --git a/src/browser/components/AIView.tsx b/src/browser/components/AIView.tsx index 53b0df58d4..131d031311 100644 --- a/src/browser/components/AIView.tsx +++ b/src/browser/components/AIView.tsx @@ -133,7 +133,7 @@ const AIViewInner: React.FC = ({ } for (const message of workspaceState.messages) { - if (message.type !== "stream-error") { + if (message.type !== "stream-error" && message.type !== "chat-error") { continue; } if (message.errorType !== "model_not_found") { @@ -143,7 +143,7 @@ const AIViewInner: React.FC = ({ continue; } handledModelErrorsRef.current.add(message.id); - if (message.model) { + if (message.type === "stream-error" && message.model) { evictModelFromLRU(message.model); } } diff --git a/src/browser/components/Messages/MessageRenderer.tsx b/src/browser/components/Messages/MessageRenderer.tsx index fca1ad1726..0b3bbd5d99 100644 --- a/src/browser/components/Messages/MessageRenderer.tsx +++ b/src/browser/components/Messages/MessageRenderer.tsx @@ -5,7 +5,7 @@ import { UserMessage } from "./UserMessage"; import { AssistantMessage } from "./AssistantMessage"; import { ToolMessage } from "./ToolMessage"; import { ReasoningMessage } from "./ReasoningMessage"; -import { StreamErrorMessage } from "./StreamErrorMessage"; +import { StreamErrorMessage, ChatErrorMessage } from "./StreamErrorMessage"; import { HistoryHiddenMessage } from "./HistoryHiddenMessage"; import { InitMessage } from "./InitMessage"; @@ -56,6 +56,8 @@ export const MessageRenderer = React.memo( return ; case "stream-error": return ; + case "chat-error": + return ; case "history-hidden": return ; case "workspace-init": diff --git a/src/browser/components/Messages/StreamErrorMessage.tsx b/src/browser/components/Messages/StreamErrorMessage.tsx index 050dcb7c41..8e9b0e93a9 100644 --- a/src/browser/components/Messages/StreamErrorMessage.tsx +++ b/src/browser/components/Messages/StreamErrorMessage.tsx @@ -31,3 +31,29 @@ export const StreamErrorMessage: React.FC = ({ message, ); }; + +/** + * ChatErrorMessage - displays pre-stream errors (before AI SDK streaming starts). + * These are errors like invalid model, missing API key, unsupported provider, etc. + */ +interface ChatErrorMessageProps { + message: DisplayedMessage & { type: "chat-error" }; + className?: string; +} + +export const ChatErrorMessage: React.FC = ({ message, className }) => { + return ( +
+
+ + Error + + {message.errorType} + +
+
+ {message.error} +
+
+ ); +}; diff --git a/src/browser/stores/WorkspaceStore.ts b/src/browser/stores/WorkspaceStore.ts index 18ba5c1dd5..8046fb26bb 100644 --- a/src/browser/stores/WorkspaceStore.ts +++ b/src/browser/stores/WorkspaceStore.ts @@ -13,6 +13,7 @@ import { useSyncExternalStore } from "react"; import { isCaughtUpMessage, isStreamError, + isChatError, isDeleteMessage, isMuxMessage, isQueuedMessageChanged, @@ -998,6 +999,16 @@ export class WorkspaceStore { return; } + // chat-error: pre-stream failures (invalid model, missing API key, etc.) + // These are NOT auto-retryable - they require user action + // Don't increment retry counter, but still show in UI and allow manual retry + if (isChatError(data)) { + aggregator.handleChatError(data); + this.states.bump(workspaceId); + this.dispatchResumeCheck(workspaceId); + return; + } + if (isDeleteMessage(data)) { aggregator.handleDeleteMessage(data); this.states.bump(workspaceId); diff --git a/src/browser/utils/messages/StreamingMessageAggregator.ts b/src/browser/utils/messages/StreamingMessageAggregator.ts index 4e0c8f9ec1..fe6a819e4b 100644 --- a/src/browser/utils/messages/StreamingMessageAggregator.ts +++ b/src/browser/utils/messages/StreamingMessageAggregator.ts @@ -21,7 +21,12 @@ import type { import type { LanguageModelV2Usage } from "@ai-sdk/provider"; import type { TodoItem, StatusSetToolResult } from "@/common/types/tools"; -import type { WorkspaceChatMessage, StreamErrorMessage, DeleteMessage } from "@/common/orpc/types"; +import type { + WorkspaceChatMessage, + StreamErrorMessage, + ChatErrorMessage, + DeleteMessage, +} from "@/common/orpc/types"; import { isInitStart, isInitOutput, isInitEnd, isMuxMessage } from "@/common/orpc/types"; import type { DynamicToolPart, @@ -589,6 +594,40 @@ export class StreamingMessageAggregator { } } + /** + * Handle pre-stream chat errors (before stream-start). + * These are distinct from stream-error (AI SDK errors during streaming). + * + * chat-error occurs when: + * - Model validation fails (invalid format, non-existent model) + * - API key is missing + * - Provider is not supported + * - etc. + * + * Creates a synthetic error message since there's no active stream. + */ + handleChatError(data: ChatErrorMessage): void { + // Get the highest historySequence from existing messages so this appears at the end + const maxSequence = Math.max( + 0, + ...Array.from(this.messages.values()).map((m) => m.metadata?.historySequence ?? 0) + ); + const errorMessage: MuxMessage = { + id: data.messageId, + role: "assistant", + parts: [], + metadata: { + partial: true, + error: data.error, + errorType: data.errorType, + timestamp: Date.now(), + historySequence: maxSequence + 1, + }, + }; + this.messages.set(data.messageId, errorMessage); + this.invalidateCache(); + } + handleToolCallStart(data: ToolCallStartEvent): void { const message = this.messages.get(data.messageId); if (!message) return; diff --git a/src/browser/utils/messages/messageUtils.ts b/src/browser/utils/messages/messageUtils.ts index 32d40640fe..e4141de675 100644 --- a/src/browser/utils/messages/messageUtils.ts +++ b/src/browser/utils/messages/messageUtils.ts @@ -11,6 +11,7 @@ export function shouldShowInterruptedBarrier(msg: DisplayedMessage): boolean { if ( msg.type === "user" || msg.type === "stream-error" || + msg.type === "chat-error" || msg.type === "history-hidden" || msg.type === "workspace-init" ) diff --git a/src/browser/utils/messages/retryEligibility.ts b/src/browser/utils/messages/retryEligibility.ts index 565c6f9236..ec4a0fb255 100644 --- a/src/browser/utils/messages/retryEligibility.ts +++ b/src/browser/utils/messages/retryEligibility.ts @@ -92,6 +92,7 @@ export function hasInterruptedStream( return ( lastMessage.type === "stream-error" || // Stream errored out (show UI for ALL error types) + lastMessage.type === "chat-error" || // Pre-stream error (show UI so user can retry after fixing) lastMessage.type === "user" || // No response received yet (app restart during slow model) (lastMessage.type === "assistant" && lastMessage.isPartial === true) || (lastMessage.type === "tool" && lastMessage.isPartial === true) || @@ -122,6 +123,13 @@ export function isEligibleForAutoRetry( // If the last message is a non-retryable error, don't auto-retry // (but manual retry is still available via hasInterruptedStream) const lastMessage = messages[messages.length - 1]; + + // chat-error is NEVER auto-retryable - always requires user action + // (fixing model selection, adding API key, etc.) + if (lastMessage.type === "chat-error") { + return false; + } + if (lastMessage.type === "stream-error") { // Debug flag: force all errors to be retryable if (isForceAllRetryableEnabled()) { diff --git a/src/common/orpc/schemas.ts b/src/common/orpc/schemas.ts index 849d2a1e91..e5d6a65b09 100644 --- a/src/common/orpc/schemas.ts +++ b/src/common/orpc/schemas.ts @@ -82,6 +82,7 @@ export { ReasoningEndEventSchema, RestoreToInputEventSchema, SendMessageOptionsSchema, + ChatErrorMessageSchema, StreamAbortEventSchema, StreamDeltaEventSchema, StreamEndEventSchema, diff --git a/src/common/orpc/schemas/stream.ts b/src/common/orpc/schemas/stream.ts index 9a2d6b092c..eeeee62c11 100644 --- a/src/common/orpc/schemas/stream.ts +++ b/src/common/orpc/schemas/stream.ts @@ -22,6 +22,23 @@ export const StreamErrorMessageSchema = z.object({ errorType: StreamErrorTypeSchema, }); +/** + * Chat error message - for errors that occur BEFORE streaming starts. + * Distinct from StreamErrorMessage (AI SDK stream errors that happen during streaming). + * + * These errors are NOT auto-retryable - they require user action: + * - Invalid model format + * - Missing API key + * - Unsupported provider + * - etc. + */ +export const ChatErrorMessageSchema = z.object({ + type: z.literal("chat-error"), + messageId: z.string(), + error: z.string(), + errorType: StreamErrorTypeSchema, +}); + export const DeleteMessageSchema = z.object({ type: z.literal("delete"), historySequences: z.array(z.number()), @@ -260,6 +277,7 @@ export const WorkspaceChatMessageSchema = z.discriminatedUnion("type", [ // Stream lifecycle events CaughtUpMessageSchema, StreamErrorMessageSchema, + ChatErrorMessageSchema, DeleteMessageSchema, StreamStartEventSchema, StreamDeltaEventSchema, diff --git a/src/common/orpc/types.ts b/src/common/orpc/types.ts index 51a7759141..b97969303a 100644 --- a/src/common/orpc/types.ts +++ b/src/common/orpc/types.ts @@ -25,6 +25,7 @@ export type ImagePart = z.infer; export type WorkspaceChatMessage = z.infer; export type CaughtUpMessage = z.infer; export type StreamErrorMessage = z.infer; +export type ChatErrorMessage = z.infer; export type DeleteMessage = z.infer; export type WorkspaceInitEvent = z.infer; export type UpdateStatus = z.infer; @@ -43,6 +44,10 @@ export function isStreamError(msg: WorkspaceChatMessage): msg is StreamErrorMess return (msg as { type?: string }).type === "stream-error"; } +export function isChatError(msg: WorkspaceChatMessage): msg is ChatErrorMessage { + return (msg as { type?: string }).type === "chat-error"; +} + export function isDeleteMessage(msg: WorkspaceChatMessage): msg is DeleteMessage { return (msg as { type?: string }).type === "delete"; } diff --git a/src/common/types/message.ts b/src/common/types/message.ts index fc402fc5dd..b8647df989 100644 --- a/src/common/types/message.ts +++ b/src/common/types/message.ts @@ -222,6 +222,17 @@ export type DisplayedMessage = model?: string; errorCount?: number; // Number of consecutive identical errors merged into this message } + | { + // chat-error: Pre-stream failures (before stream-start) + // Distinct from stream-error which occurs during AI SDK streaming + type: "chat-error"; + id: string; // Display ID for UI/React keys + historyId: string; // Original MuxMessage ID for history operations + error: string; // Error message + errorType: StreamErrorType; // Error type/category + historySequence: number; // Global ordering across all messages + timestamp?: number; + } | { type: "history-hidden"; id: string; // Display ID for UI/React keys diff --git a/src/node/services/agentSession.ts b/src/node/services/agentSession.ts index b15d13cc9d..0af95ae89b 100644 --- a/src/node/services/agentSession.ts +++ b/src/node/services/agentSession.ts @@ -13,12 +13,16 @@ import type { RuntimeConfig } from "@/common/types/runtime"; import { DEFAULT_RUNTIME_CONFIG } from "@/common/constants/workspace"; import type { WorkspaceChatMessage, + ChatErrorMessage, StreamErrorMessage, SendMessageOptions, ImagePart, } from "@/common/orpc/types"; import type { SendMessageError } from "@/common/types/errors"; -import { createUnknownSendMessageError } from "@/node/services/utils/sendMessageError"; +import { + createUnknownSendMessageError, + formatSendMessageError, +} from "@/node/services/utils/sendMessageError"; import type { Result } from "@/common/types/result"; import { Ok, Err } from "@/common/types/result"; import { enforceThinkingPolicy } from "@/browser/utils/thinking/policy"; @@ -427,9 +431,11 @@ export class AgentSession { } if (!options?.model || options.model.trim().length === 0) { - return Err( - createUnknownSendMessageError("No model specified. Please select a model using /model.") + const error = createUnknownSendMessageError( + "No model specified. Please select a model using /model." ); + this.emitChatError(error); + return Err(error); } return this.streamWithHistory(options.model, options); @@ -483,12 +489,16 @@ export class AgentSession { ): Promise> { const commitResult = await this.partialService.commitToHistory(this.workspaceId); if (!commitResult.success) { - return Err(createUnknownSendMessageError(commitResult.error)); + const error = createUnknownSendMessageError(commitResult.error); + this.emitChatError(error); + return Err(error); } const historyResult = await this.historyService.getHistory(this.workspaceId); if (!historyResult.success) { - return Err(createUnknownSendMessageError(historyResult.error)); + const error = createUnknownSendMessageError(historyResult.error); + this.emitChatError(error); + return Err(error); } // Enforce thinking policy for the specified model (single source of truth) @@ -497,7 +507,7 @@ export class AgentSession { ? enforceThinkingPolicy(modelString, options.thinkingLevel) : undefined; - return this.aiService.streamMessage( + const result = await this.aiService.streamMessage( historyResult.data, this.workspaceId, modelString, @@ -509,6 +519,40 @@ export class AgentSession { options?.providerOptions, options?.mode ); + + // If stream failed to start, emit a stream-error so the user sees the error in the chat UI + // (not just a toast which might be missed). The error is already displayed in the chat via + // stream-error handling, which shows up as an error message in the conversation. + if (!result.success) { + this.emitChatError(result.error); + } + + return result; + } + + /** + * Emit a chat-error event for pre-stream failures. + * Distinct from stream-error (AI SDK errors during streaming). + * + * chat-error is for errors that occur BEFORE streaming starts: + * - Invalid model format + * - Missing API key + * - Unsupported provider + * - etc. + * + * This ensures errors are visible in the chat UI, not just as a toast + * that might be dismissed or missed. + */ + private emitChatError(error: SendMessageError): void { + const { message, errorType } = formatSendMessageError(error); + const chatError: ChatErrorMessage = { + type: "chat-error", + // Use a synthetic messageId since no assistant message was created + messageId: `error-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`, + error: message, + errorType, + }; + this.emitChatEvent(chatError); } private attachAiListeners(): void { diff --git a/tests/ipc/sendMessage.errors.test.ts b/tests/ipc/sendMessage.errors.test.ts index 256a240a09..955c49165a 100644 --- a/tests/ipc/sendMessage.errors.test.ts +++ b/tests/ipc/sendMessage.errors.test.ts @@ -96,6 +96,30 @@ describeIntegration("sendMessage error handling tests", () => { 15000 ); + test.concurrent( + "should emit chat-error when model validation fails early", + async () => { + await withSharedWorkspace("openai", async ({ env, workspaceId, collector }) => { + // Send a message with an invalid model format (causes early validation failure) + const result = await sendMessage(env, workspaceId, "Hello", { + model: "invalid-model-without-provider", + }); + + // IPC call fails immediately (pre-stream validation error) + expect(result.success).toBe(false); + + // Should emit chat-error (not stream-error) since this is a pre-stream failure. + // This is important because the user message is displayed before stream starts. + const errorEvent = await collector.waitForEvent("chat-error", 3000); + expect(errorEvent).toBeDefined(); + if (errorEvent?.type === "chat-error") { + expect(errorEvent.error).toBeDefined(); + } + }); + }, + 15000 + ); + test.concurrent( "should fail with non-existent model", async () => {