Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/browser/components/AIView.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ const AIViewInner: React.FC<AIViewProps> = ({
}

for (const message of workspaceState.messages) {
if (message.type !== "stream-error") {
if (message.type !== "stream-error" && message.type !== "chat-error") {
continue;
}
if (message.errorType !== "model_not_found") {
Expand All @@ -143,7 +143,7 @@ const AIViewInner: React.FC<AIViewProps> = ({
continue;
}
handledModelErrorsRef.current.add(message.id);
if (message.model) {
if (message.type === "stream-error" && message.model) {
evictModelFromLRU(message.model);
}
}
Expand Down
4 changes: 3 additions & 1 deletion src/browser/components/Messages/MessageRenderer.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import { UserMessage } from "./UserMessage";
import { AssistantMessage } from "./AssistantMessage";
import { ToolMessage } from "./ToolMessage";
import { ReasoningMessage } from "./ReasoningMessage";
import { StreamErrorMessage } from "./StreamErrorMessage";
import { StreamErrorMessage, ChatErrorMessage } from "./StreamErrorMessage";
import { HistoryHiddenMessage } from "./HistoryHiddenMessage";
import { InitMessage } from "./InitMessage";

Expand Down Expand Up @@ -56,6 +56,8 @@ export const MessageRenderer = React.memo<MessageRendererProps>(
return <ReasoningMessage message={message} className={className} />;
case "stream-error":
return <StreamErrorMessage message={message} className={className} />;
case "chat-error":
return <ChatErrorMessage message={message} className={className} />;
case "history-hidden":
return <HistoryHiddenMessage message={message} className={className} />;
case "workspace-init":
Expand Down
26 changes: 26 additions & 0 deletions src/browser/components/Messages/StreamErrorMessage.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,29 @@ export const StreamErrorMessage: React.FC<StreamErrorMessageProps> = ({ message,
</div>
);
};

/**
* ChatErrorMessage - displays pre-stream errors (before AI SDK streaming starts).
* These are errors like invalid model, missing API key, unsupported provider, etc.
*/
interface ChatErrorMessageProps {
message: DisplayedMessage & { type: "chat-error" };
className?: string;
}

export const ChatErrorMessage: React.FC<ChatErrorMessageProps> = ({ message, className }) => {
return (
<div className={cn("bg-error-bg border border-error rounded px-5 py-4 my-3", className)}>
<div className="font-primary text-error mb-3 flex items-center gap-2.5 text-[13px] font-semibold tracking-wide">
<span className="text-base leading-none">●</span>
<span>Error</span>
<span className="text-secondary rounded-sm bg-black/40 px-2 py-0.5 font-mono text-[10px] tracking-wider uppercase">
{message.errorType}
</span>
</div>
<div className="text-foreground font-mono text-[13px] leading-relaxed break-words">
{message.error}
</div>
</div>
);
};
11 changes: 11 additions & 0 deletions src/browser/stores/WorkspaceStore.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import { useSyncExternalStore } from "react";
import {
isCaughtUpMessage,
isStreamError,
isChatError,
isDeleteMessage,
isMuxMessage,
isQueuedMessageChanged,
Expand Down Expand Up @@ -998,6 +999,16 @@ export class WorkspaceStore {
return;
}

// chat-error: pre-stream failures (invalid model, missing API key, etc.)
// These are NOT auto-retryable - they require user action
// Don't increment retry counter, but still show in UI and allow manual retry
if (isChatError(data)) {
aggregator.handleChatError(data);
this.states.bump(workspaceId);
this.dispatchResumeCheck(workspaceId);
return;
}

if (isDeleteMessage(data)) {
aggregator.handleDeleteMessage(data);
this.states.bump(workspaceId);
Expand Down
41 changes: 40 additions & 1 deletion src/browser/utils/messages/StreamingMessageAggregator.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,12 @@ import type {
import type { LanguageModelV2Usage } from "@ai-sdk/provider";
import type { TodoItem, StatusSetToolResult } from "@/common/types/tools";

import type { WorkspaceChatMessage, StreamErrorMessage, DeleteMessage } from "@/common/orpc/types";
import type {
WorkspaceChatMessage,
StreamErrorMessage,
ChatErrorMessage,
DeleteMessage,
} from "@/common/orpc/types";
import { isInitStart, isInitOutput, isInitEnd, isMuxMessage } from "@/common/orpc/types";
import type {
DynamicToolPart,
Expand Down Expand Up @@ -589,6 +594,40 @@ export class StreamingMessageAggregator {
}
}

/**
* Handle pre-stream chat errors (before stream-start).
* These are distinct from stream-error (AI SDK errors during streaming).
*
* chat-error occurs when:
* - Model validation fails (invalid format, non-existent model)
* - API key is missing
* - Provider is not supported
* - etc.
*
* Creates a synthetic error message since there's no active stream.
*/
handleChatError(data: ChatErrorMessage): void {
// Get the highest historySequence from existing messages so this appears at the end
const maxSequence = Math.max(
0,
...Array.from(this.messages.values()).map((m) => m.metadata?.historySequence ?? 0)
);
const errorMessage: MuxMessage = {
id: data.messageId,
role: "assistant",
parts: [],
metadata: {
partial: true,
error: data.error,
errorType: data.errorType,
timestamp: Date.now(),
Comment on lines +619 to +623

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Surface chat-error messages instead of stream-error

In handleChatError the pre-stream error is stored as a plain MuxMessage with only error metadata and no chat-error type, so getDisplayedMessages will still convert it into a stream-error entry. That means the new chat-error UI/logic never triggers: pre-stream failures such as unsupported providers or missing API keys are classified as retryable stream errors, allowing auto-resume to keep retrying instead of forcing user action.

Useful? React with 👍 / 👎.

historySequence: maxSequence + 1,
},
};
this.messages.set(data.messageId, errorMessage);
this.invalidateCache();
}

handleToolCallStart(data: ToolCallStartEvent): void {
const message = this.messages.get(data.messageId);
if (!message) return;
Expand Down
1 change: 1 addition & 0 deletions src/browser/utils/messages/messageUtils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ export function shouldShowInterruptedBarrier(msg: DisplayedMessage): boolean {
if (
msg.type === "user" ||
msg.type === "stream-error" ||
msg.type === "chat-error" ||
msg.type === "history-hidden" ||
msg.type === "workspace-init"
)
Expand Down
8 changes: 8 additions & 0 deletions src/browser/utils/messages/retryEligibility.ts
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ export function hasInterruptedStream(

return (
lastMessage.type === "stream-error" || // Stream errored out (show UI for ALL error types)
lastMessage.type === "chat-error" || // Pre-stream error (show UI so user can retry after fixing)
lastMessage.type === "user" || // No response received yet (app restart during slow model)
(lastMessage.type === "assistant" && lastMessage.isPartial === true) ||
(lastMessage.type === "tool" && lastMessage.isPartial === true) ||
Expand Down Expand Up @@ -122,6 +123,13 @@ export function isEligibleForAutoRetry(
// If the last message is a non-retryable error, don't auto-retry
// (but manual retry is still available via hasInterruptedStream)
const lastMessage = messages[messages.length - 1];

// chat-error is NEVER auto-retryable - always requires user action
// (fixing model selection, adding API key, etc.)
if (lastMessage.type === "chat-error") {
return false;
}

if (lastMessage.type === "stream-error") {
// Debug flag: force all errors to be retryable
if (isForceAllRetryableEnabled()) {
Expand Down
1 change: 1 addition & 0 deletions src/common/orpc/schemas.ts
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ export {
ReasoningEndEventSchema,
RestoreToInputEventSchema,
SendMessageOptionsSchema,
ChatErrorMessageSchema,
StreamAbortEventSchema,
StreamDeltaEventSchema,
StreamEndEventSchema,
Expand Down
18 changes: 18 additions & 0 deletions src/common/orpc/schemas/stream.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,23 @@ export const StreamErrorMessageSchema = z.object({
errorType: StreamErrorTypeSchema,
});

/**
* Chat error message - for errors that occur BEFORE streaming starts.
* Distinct from StreamErrorMessage (AI SDK stream errors that happen during streaming).
*
* These errors are NOT auto-retryable - they require user action:
* - Invalid model format
* - Missing API key
* - Unsupported provider
* - etc.
*/
export const ChatErrorMessageSchema = z.object({
type: z.literal("chat-error"),
messageId: z.string(),
error: z.string(),
errorType: StreamErrorTypeSchema,
});

export const DeleteMessageSchema = z.object({
type: z.literal("delete"),
historySequences: z.array(z.number()),
Expand Down Expand Up @@ -260,6 +277,7 @@ export const WorkspaceChatMessageSchema = z.discriminatedUnion("type", [
// Stream lifecycle events
CaughtUpMessageSchema,
StreamErrorMessageSchema,
ChatErrorMessageSchema,
DeleteMessageSchema,
StreamStartEventSchema,
StreamDeltaEventSchema,
Expand Down
5 changes: 5 additions & 0 deletions src/common/orpc/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ export type ImagePart = z.infer<typeof schemas.ImagePartSchema>;
export type WorkspaceChatMessage = z.infer<typeof schemas.WorkspaceChatMessageSchema>;
export type CaughtUpMessage = z.infer<typeof schemas.CaughtUpMessageSchema>;
export type StreamErrorMessage = z.infer<typeof schemas.StreamErrorMessageSchema>;
export type ChatErrorMessage = z.infer<typeof schemas.ChatErrorMessageSchema>;
export type DeleteMessage = z.infer<typeof schemas.DeleteMessageSchema>;
export type WorkspaceInitEvent = z.infer<typeof schemas.WorkspaceInitEventSchema>;
export type UpdateStatus = z.infer<typeof schemas.UpdateStatusSchema>;
Expand All @@ -43,6 +44,10 @@ export function isStreamError(msg: WorkspaceChatMessage): msg is StreamErrorMess
return (msg as { type?: string }).type === "stream-error";
}

export function isChatError(msg: WorkspaceChatMessage): msg is ChatErrorMessage {
return (msg as { type?: string }).type === "chat-error";
}

export function isDeleteMessage(msg: WorkspaceChatMessage): msg is DeleteMessage {
return (msg as { type?: string }).type === "delete";
}
Expand Down
11 changes: 11 additions & 0 deletions src/common/types/message.ts
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,17 @@ export type DisplayedMessage =
model?: string;
errorCount?: number; // Number of consecutive identical errors merged into this message
}
| {
// chat-error: Pre-stream failures (before stream-start)
// Distinct from stream-error which occurs during AI SDK streaming
type: "chat-error";
id: string; // Display ID for UI/React keys
historyId: string; // Original MuxMessage ID for history operations
error: string; // Error message
errorType: StreamErrorType; // Error type/category
historySequence: number; // Global ordering across all messages
timestamp?: number;
}
| {
type: "history-hidden";
id: string; // Display ID for UI/React keys
Expand Down
56 changes: 50 additions & 6 deletions src/node/services/agentSession.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,16 @@ import type { RuntimeConfig } from "@/common/types/runtime";
import { DEFAULT_RUNTIME_CONFIG } from "@/common/constants/workspace";
import type {
WorkspaceChatMessage,
ChatErrorMessage,
StreamErrorMessage,
SendMessageOptions,
ImagePart,
} from "@/common/orpc/types";
import type { SendMessageError } from "@/common/types/errors";
import { createUnknownSendMessageError } from "@/node/services/utils/sendMessageError";
import {
createUnknownSendMessageError,
formatSendMessageError,
} from "@/node/services/utils/sendMessageError";
import type { Result } from "@/common/types/result";
import { Ok, Err } from "@/common/types/result";
import { enforceThinkingPolicy } from "@/browser/utils/thinking/policy";
Expand Down Expand Up @@ -427,9 +431,11 @@ export class AgentSession {
}

if (!options?.model || options.model.trim().length === 0) {
return Err(
createUnknownSendMessageError("No model specified. Please select a model using /model.")
const error = createUnknownSendMessageError(
"No model specified. Please select a model using /model."
);
this.emitChatError(error);
return Err(error);
}

return this.streamWithHistory(options.model, options);
Expand Down Expand Up @@ -483,12 +489,16 @@ export class AgentSession {
): Promise<Result<void, SendMessageError>> {
const commitResult = await this.partialService.commitToHistory(this.workspaceId);
if (!commitResult.success) {
return Err(createUnknownSendMessageError(commitResult.error));
const error = createUnknownSendMessageError(commitResult.error);
this.emitChatError(error);
return Err(error);
}

const historyResult = await this.historyService.getHistory(this.workspaceId);
if (!historyResult.success) {
return Err(createUnknownSendMessageError(historyResult.error));
const error = createUnknownSendMessageError(historyResult.error);
this.emitChatError(error);
return Err(error);
}

// Enforce thinking policy for the specified model (single source of truth)
Expand All @@ -497,7 +507,7 @@ export class AgentSession {
? enforceThinkingPolicy(modelString, options.thinkingLevel)
: undefined;

return this.aiService.streamMessage(
const result = await this.aiService.streamMessage(
historyResult.data,
this.workspaceId,
modelString,
Expand All @@ -509,6 +519,40 @@ export class AgentSession {
options?.providerOptions,
options?.mode
);

// If stream failed to start, emit a stream-error so the user sees the error in the chat UI
// (not just a toast which might be missed). The error is already displayed in the chat via
// stream-error handling, which shows up as an error message in the conversation.
if (!result.success) {
this.emitChatError(result.error);
}

return result;
}

/**
* Emit a chat-error event for pre-stream failures.
* Distinct from stream-error (AI SDK errors during streaming).
*
* chat-error is for errors that occur BEFORE streaming starts:
* - Invalid model format
* - Missing API key
* - Unsupported provider
* - etc.
*
* This ensures errors are visible in the chat UI, not just as a toast
* that might be dismissed or missed.
*/
private emitChatError(error: SendMessageError): void {
const { message, errorType } = formatSendMessageError(error);
const chatError: ChatErrorMessage = {
type: "chat-error",
// Use a synthetic messageId since no assistant message was created
messageId: `error-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`,
error: message,
errorType,
};
this.emitChatEvent(chatError);
}

private attachAiListeners(): void {
Expand Down
24 changes: 24 additions & 0 deletions tests/ipc/sendMessage.errors.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,30 @@ describeIntegration("sendMessage error handling tests", () => {
15000
);

test.concurrent(
"should emit chat-error when model validation fails early",
async () => {
await withSharedWorkspace("openai", async ({ env, workspaceId, collector }) => {
// Send a message with an invalid model format (causes early validation failure)
const result = await sendMessage(env, workspaceId, "Hello", {
model: "invalid-model-without-provider",
});

// IPC call fails immediately (pre-stream validation error)
expect(result.success).toBe(false);

// Should emit chat-error (not stream-error) since this is a pre-stream failure.
// This is important because the user message is displayed before stream starts.
const errorEvent = await collector.waitForEvent("chat-error", 3000);
expect(errorEvent).toBeDefined();
if (errorEvent?.type === "chat-error") {
expect(errorEvent.error).toBeDefined();
}
});
},
15000
);

test.concurrent(
"should fail with non-existent model",
async () => {
Expand Down
Loading