diff --git a/cockpit/langgraph/streaming/python/langgraph.json b/cockpit/langgraph/streaming/python/langgraph.json index 29997e521..a76b91047 100644 --- a/cockpit/langgraph/streaming/python/langgraph.json +++ b/cockpit/langgraph/streaming/python/langgraph.json @@ -1,6 +1,17 @@ { "graphs": { - "streaming": "./src/graph.py:graph" + "streaming": "./src/graph.py:graph", + "generative_ui": "./src/chat_graphs.py:generative_ui", + "c-messages": "./src/chat_graphs.py:c_messages", + "c-input": "./src/chat_graphs.py:c_input", + "c-debug": "./src/chat_graphs.py:c_debug", + "c-interrupts": "./src/chat_graphs.py:c_interrupts", + "c-theming": "./src/chat_graphs.py:c_theming", + "c-threads": "./src/chat_graphs.py:c_threads", + "c-timeline": "./src/chat_graphs.py:c_timeline", + "c-tool-calls": "./src/chat_graphs.py:c_tool_calls", + "c-subagents": "./src/chat_graphs.py:c_subagents", + "a2ui_form": "./src/a2ui_graph.py:graph" }, "dependencies": [ "." diff --git a/cockpit/langgraph/streaming/python/prompts/debug.md b/cockpit/langgraph/streaming/python/prompts/debug.md new file mode 100644 index 000000000..368a5d96b --- /dev/null +++ b/cockpit/langgraph/streaming/python/prompts/debug.md @@ -0,0 +1,13 @@ +# Chat Debug Assistant + +You are an assistant that demonstrates the ChatDebugComponent for +development inspection. + +Your responses pass through a multi-step pipeline (generate -> process -> +summarize), creating multiple state transitions that are visible in the +debug panel. Each step produces different state data that developers can +inspect using the timeline, state inspector, and diff viewer. + +Respond helpfully while noting that your response will be processed +through multiple graph nodes, each creating a checkpoint visible in +the debug panel. diff --git a/cockpit/langgraph/streaming/python/prompts/generative-ui.md b/cockpit/langgraph/streaming/python/prompts/generative-ui.md new file mode 100644 index 000000000..98c06c985 --- /dev/null +++ b/cockpit/langgraph/streaming/python/prompts/generative-ui.md @@ -0,0 +1,46 @@ +# Generative UI Assistant + +You are a generative-UI assistant. You MUST respond with **raw JSON only** — no markdown, no code fences, no explanation text. Your entire response must be a single valid JSON object following the Spec format below. + +## Spec Schema + +A **Spec** is a JSON object with two required top-level keys: + +``` +{ + "elements": { [key: string]: Element }, + "rootKey": string +} +``` + +An **Element** has: + +``` +{ + "type": string, // component type name + "props": { ... }, // component-specific properties + "children?": string[] // ordered list of element keys (references into `elements`) +} +``` + +## Available Component Types + +| Type | Props | Children | +|-----------------|--------------------------------------------------------------|----------| +| `container` | *(none)* | Yes | +| `weather_card` | `city` (string), `temperature` (number), `condition` (string)| No | +| `stat_card` | `label` (string), `value` (string) | No | + +## Rules + +1. Respond ONLY with valid JSON. No markdown. No code fences. No surrounding text. +2. Every element referenced in a `children` array must exist as a key in `elements`. +3. `rootKey` must reference a key that exists in `elements`. +4. Use `container` to group multiple cards together. +5. Choose component types that best match the user's request. + +## Example Response + +If the user asks "What's the weather in Chicago and New York?", respond exactly like: + +{"elements":{"root":{"type":"container","props":{},"children":["chicago","nyc"]},"chicago":{"type":"weather_card","props":{"city":"Chicago","temperature":45,"condition":"Partly Cloudy"}},"nyc":{"type":"weather_card","props":{"city":"New York","temperature":52,"condition":"Sunny"}}},"rootKey":"root"} diff --git a/cockpit/langgraph/streaming/python/prompts/input.md b/cockpit/langgraph/streaming/python/prompts/input.md new file mode 100644 index 000000000..ed2b23926 --- /dev/null +++ b/cockpit/langgraph/streaming/python/prompts/input.md @@ -0,0 +1,11 @@ +# Chat Input Assistant + +You are an assistant that demonstrates the ChatInputComponent from @cacheplane/chat. + +Echo back what the user says, and explain the input features being demonstrated: +- Custom placeholder text +- Keyboard handling (Enter to send, Shift+Enter for newline) +- Disabled state while the agent is responding +- Loading indicator integration + +Keep responses concise to showcase the streaming and input state transitions. diff --git a/cockpit/langgraph/streaming/python/prompts/interrupts.md b/cockpit/langgraph/streaming/python/prompts/interrupts.md new file mode 100644 index 000000000..aa92596a4 --- /dev/null +++ b/cockpit/langgraph/streaming/python/prompts/interrupts.md @@ -0,0 +1,12 @@ +# Chat Interrupts Assistant + +You are an assistant that demonstrates human-in-the-loop approval gates +using LangGraph interrupts. + +Every response you generate will be paused at an approval gate before +being finalized. This demonstrates the interrupt() primitive that enables +human oversight of AI actions. + +Explain to the user that after you draft a response, they will see an +approval panel where they can approve or reject the response before it +is committed to the conversation. diff --git a/cockpit/langgraph/streaming/python/prompts/messages.md b/cockpit/langgraph/streaming/python/prompts/messages.md new file mode 100644 index 000000000..5553c7818 --- /dev/null +++ b/cockpit/langgraph/streaming/python/prompts/messages.md @@ -0,0 +1,12 @@ +# Chat Messages Assistant + +You are an assistant that demonstrates the chat message primitives from @cacheplane/chat. + +Your role is to showcase different message types and rendering styles. +Use varied response formats including short answers, longer explanations, +bulleted lists, and code snippets to demonstrate how ChatMessagesComponent +renders different content. + +When greeting the user, explain that this demo showcases ChatMessagesComponent, +ChatInputComponent, and ChatTypingIndicatorComponent working together as +individual primitives rather than the composed ChatComponent. diff --git a/cockpit/langgraph/streaming/python/prompts/subagents.md b/cockpit/langgraph/streaming/python/prompts/subagents.md new file mode 100644 index 000000000..328d196ef --- /dev/null +++ b/cockpit/langgraph/streaming/python/prompts/subagents.md @@ -0,0 +1,12 @@ +# Chat Subagents Orchestrator + +You are the orchestrator in a multi-agent system. You coordinate specialized +subagents to handle user requests: + +- **Research Agent**: Gathers background information and context +- **Analysis Agent**: Analyzes findings and identifies patterns +- **Summary Agent**: Produces a concise summary of results + +When the user asks a question, acknowledge their request and explain that +you are delegating work to your specialized subagents. Each subagent will +process the task in sequence and their progress will be visible in the UI. diff --git a/cockpit/langgraph/streaming/python/prompts/theming.md b/cockpit/langgraph/streaming/python/prompts/theming.md new file mode 100644 index 000000000..88613665f --- /dev/null +++ b/cockpit/langgraph/streaming/python/prompts/theming.md @@ -0,0 +1,13 @@ +# Chat Theming Assistant + +You are an assistant that demonstrates chat theming and CSS custom +property customization in @cacheplane/chat. + +The chat UI supports extensive theming via CSS custom properties like +`--chat-bg`, `--chat-text`, `--chat-accent`, `--chat-surface`, and more. +These can be swapped at runtime using CHAT_THEME_STYLES or by setting +CSS variables on a parent element. + +Explain the theming system when asked, and demonstrate how different +themes change the appearance of the chat interface. The sidebar contains +theme picker buttons that swap themes in real time. diff --git a/cockpit/langgraph/streaming/python/prompts/threads.md b/cockpit/langgraph/streaming/python/prompts/threads.md new file mode 100644 index 000000000..678d2ad5e --- /dev/null +++ b/cockpit/langgraph/streaming/python/prompts/threads.md @@ -0,0 +1,11 @@ +# Chat Threads Assistant + +You are an assistant that demonstrates multi-thread conversation management. + +Each conversation thread maintains its own isolated message history. +Users can create new threads, switch between existing threads, and +each thread preserves its full conversation context independently. + +When the user starts a conversation, acknowledge the current thread +and explain that they can create new threads or switch between them +using the thread list in the sidebar. diff --git a/cockpit/langgraph/streaming/python/prompts/timeline.md b/cockpit/langgraph/streaming/python/prompts/timeline.md new file mode 100644 index 000000000..b4ff48874 --- /dev/null +++ b/cockpit/langgraph/streaming/python/prompts/timeline.md @@ -0,0 +1,11 @@ +# Chat Timeline Assistant + +You are an assistant that demonstrates conversation timeline and +checkpoint navigation using the Angular agent() ref. + +Each message exchange creates a checkpoint in the conversation timeline. +Users can navigate backward and forward through these checkpoints using +the timeline slider, and even branch from a previous checkpoint to +explore alternative conversation paths. + +Respond helpfully to demonstrate how checkpoints accumulate over time. diff --git a/cockpit/langgraph/streaming/python/prompts/tool-calls.md b/cockpit/langgraph/streaming/python/prompts/tool-calls.md new file mode 100644 index 000000000..bd84e06d7 --- /dev/null +++ b/cockpit/langgraph/streaming/python/prompts/tool-calls.md @@ -0,0 +1,13 @@ +# Chat Tool Calls Assistant + +You are an assistant with access to search, calculator, and weather tools. +Use these tools proactively to answer user questions. + +Available tools: +- **search**: Search the web for information on any topic +- **calculator**: Evaluate mathematical expressions +- **weather**: Get current weather for any city + +When the user asks a question, use the appropriate tool(s) to gather +information before responding. Combine results from multiple tools +when needed. Always explain which tools you used and why. diff --git a/cockpit/langgraph/streaming/python/src/a2ui_graph.py b/cockpit/langgraph/streaming/python/src/a2ui_graph.py new file mode 100644 index 000000000..f66894a8e --- /dev/null +++ b/cockpit/langgraph/streaming/python/src/a2ui_graph.py @@ -0,0 +1,99 @@ +""" +A2UI Contact Form Graph + +Demonstrates the A2UI (Agent-to-UI) protocol by streaming JSONL that +builds an interactive contact form on the Angular frontend. +""" + +import json +from langgraph.graph import StateGraph, MessagesState, END +from langchain_core.messages import AIMessage + +A2UI_PREFIX = "---a2ui_JSON---" + +CONTACT_FORM_JSONL = A2UI_PREFIX + "\n" + "\n".join([ + json.dumps({"type": "createSurface", "surfaceId": "contact", "catalogId": "basic"}), + json.dumps({"type": "updateDataModel", "surfaceId": "contact", "value": { + "name": "", "email": "", "department": "Engineering", "consent": False, + }}), + json.dumps({"type": "updateComponents", "surfaceId": "contact", "components": [ + {"id": "root", "component": "Column", "children": ["card"]}, + {"id": "card", "component": "Card", "title": "Contact Us", "children": [ + "name_field", "email_field", "dept_picker", "consent_check", "divider", "submit_btn", + ]}, + {"id": "name_field", "component": "TextField", + "label": "Name", "value": {"path": "/name"}, "placeholder": "Your full name", + "_bindings": {"value": "/name"}, + "checks": [ + {"condition": {"call": "required", "args": {"value": {"path": "/name"}}}, + "message": "Name is required"}, + ]}, + {"id": "email_field", "component": "TextField", + "label": "Email", "value": {"path": "/email"}, "placeholder": "you@company.com", + "_bindings": {"value": "/email"}, + "checks": [ + {"condition": {"call": "required", "args": {"value": {"path": "/email"}}}, + "message": "Email is required"}, + {"condition": {"call": "email", "args": {"value": {"path": "/email"}}}, + "message": "Must be a valid email address"}, + ]}, + {"id": "dept_picker", "component": "ChoicePicker", + "label": "Department", + "options": ["Engineering", "Sales", "Support", "Marketing"], + "selected": {"path": "/department"}, + "_bindings": {"selected": "/department"}}, + {"id": "consent_check", "component": "CheckBox", + "label": "I agree to be contacted", "checked": {"path": "/consent"}, + "_bindings": {"checked": "/consent"}}, + {"id": "divider", "component": "Divider"}, + {"id": "submit_btn", "component": "Button", + "label": "Submit", + "checks": [ + {"condition": {"call": "and", "args": {"values": [ + {"call": "required", "args": {"value": {"path": "/name"}}}, + {"call": "email", "args": {"value": {"path": "/email"}}}, + {"path": "/consent"}, + ]}}, + "message": "Complete all required fields and agree to be contacted"}, + ], + "action": {"event": {"name": "formSubmit", "context": {"formId": "contact"}}}}, + ]}), +]) + + +def build_a2ui_graph(): + """ + Two-node graph: + - create_form: emits the A2UI contact form surface + - handle_event: responds to form submission events + """ + + async def create_form(state: MessagesState) -> dict: + last = state["messages"][-1] + + # If this is an a2ui_event, route to event handling + try: + payload = json.loads(last.content) + if isinstance(payload, dict) and payload.get("type") == "a2ui_event": + return await handle_event(state, payload) + except (json.JSONDecodeError, AttributeError): + pass + + # First message — emit the contact form + return {"messages": [AIMessage(content=CONTACT_FORM_JSONL)]} + + async def handle_event(state: MessagesState, payload: dict) -> dict: + name = payload.get("context", {}).get("formId", "unknown") + return {"messages": [AIMessage( + content=f"Thanks for submitting the **{name}** form! We'll be in touch soon.", + )]} + + graph = StateGraph(MessagesState) + graph.add_node("create_form", create_form) + graph.set_entry_point("create_form") + graph.add_edge("create_form", END) + + return graph.compile() + + +graph = build_a2ui_graph() diff --git a/cockpit/langgraph/streaming/python/src/chat_graphs.py b/cockpit/langgraph/streaming/python/src/chat_graphs.py new file mode 100644 index 000000000..2b2ee7300 --- /dev/null +++ b/cockpit/langgraph/streaming/python/src/chat_graphs.py @@ -0,0 +1,47 @@ +""" +Chat example graphs — consolidated into the streaming deployment. + +Each chat cockpit example (messages, input, debug, generative-ui, etc.) uses +the same graph architecture: a single-node StateGraph that prepends a system +prompt and calls the LLM. They differ only in the prompt file. + +Registering them all here avoids separate LangGraph Cloud deployments while +keeping each example addressable by its own assistant ID. +""" + +from pathlib import Path +from langgraph.graph import StateGraph, MessagesState, END +from langchain_openai import ChatOpenAI +from langchain_core.messages import SystemMessage + +PROMPTS_DIR = Path(__file__).parent.parent / "prompts" + + +def _build_prompt_graph(prompt_file: str): + """Factory: creates a compiled graph that uses the given prompt file.""" + llm = ChatOpenAI(model="gpt-5-mini", streaming=True) + + async def generate(state: MessagesState) -> dict: + system_prompt = (PROMPTS_DIR / prompt_file).read_text() + messages = [SystemMessage(content=system_prompt)] + state["messages"] + response = await llm.ainvoke(messages) + return {"messages": [response]} + + graph = StateGraph(MessagesState) + graph.add_node("generate", generate) + graph.set_entry_point("generate") + graph.add_edge("generate", END) + return graph.compile() + + +# Each graph instance is referenced by langgraph.json +c_messages = _build_prompt_graph("messages.md") +c_input = _build_prompt_graph("input.md") +c_debug = _build_prompt_graph("debug.md") +c_interrupts = _build_prompt_graph("interrupts.md") +c_theming = _build_prompt_graph("theming.md") +c_threads = _build_prompt_graph("threads.md") +c_timeline = _build_prompt_graph("timeline.md") +c_tool_calls = _build_prompt_graph("tool-calls.md") +c_subagents = _build_prompt_graph("subagents.md") +generative_ui = _build_prompt_graph("generative-ui.md") diff --git a/libs/agent/src/lib/agent.fn.spec.ts b/libs/agent/src/lib/agent.fn.spec.ts index 3fe1834f1..e3eb98efa 100644 --- a/libs/agent/src/lib/agent.fn.spec.ts +++ b/libs/agent/src/lib/agent.fn.spec.ts @@ -147,7 +147,7 @@ describe('agent', () => { expect(ref.messages()).toHaveLength(1); threadId.set('thread-2'); - await new Promise(r => setTimeout(r, 0)); + await new Promise(r => setTimeout(r, 30)); expect(ref.hasValue()).toBe(false); expect(ref.status()).toBe(ResourceStatus.Idle); diff --git a/libs/agent/src/lib/agent.fn.ts b/libs/agent/src/lib/agent.fn.ts index 2b1b0229e..a5b496b73 100644 --- a/libs/agent/src/lib/agent.fn.ts +++ b/libs/agent/src/lib/agent.fn.ts @@ -123,8 +123,9 @@ export function agent< destroy$: destroy$.asObservable(), }); - // Throttle helper - const ms = typeof options.throttle === 'number' ? options.throttle : 0; + // Throttle helper — default 16ms (~60fps) to batch SSE token updates into + // at most one signal update per frame, preventing change detection storms. + const ms = typeof options.throttle === 'number' ? options.throttle : 16; const maybeThrottle = (obs: BehaviorSubject) => ms > 0 ? obs.pipe(throttleTime(ms, asyncScheduler, { leading: true, trailing: true })) diff --git a/libs/agent/src/lib/internals/stream-manager.bridge.spec.ts b/libs/agent/src/lib/internals/stream-manager.bridge.spec.ts index 1af26243d..744661f47 100644 --- a/libs/agent/src/lib/internals/stream-manager.bridge.spec.ts +++ b/libs/agent/src/lib/internals/stream-manager.bridge.spec.ts @@ -139,6 +139,88 @@ describe('createStreamManagerBridge', () => { } ); + it.each(['messages/partial', 'messages/complete'] as const)( + 'filters metadata from normalized SDK %s events (messages array path)', + async (type) => { + const transport = new MockAgentTransport(); + const subjects = makeSubjects(); + const destroy$ = new Subject(); + const bridge = createStreamManagerBridge({ + options: { apiUrl: '', assistantId: 'test', transport }, + subjects, + threadId$: of(null), + destroy$: destroy$.asObservable(), + }); + + bridge.submit({}); + // Simulate post-normalizeSdkEvent shape: messages array includes metadata + // This is what FetchStreamTransport produces in production + transport.emit([{ + type, + messages: [ + { id: 'ai-1', type: 'ai', content: 'Hello' }, + { langgraph_node: 'chatbot', langgraph_triggers: ['start:chatbot'] }, + ], + data: [ + { id: 'ai-1', type: 'ai', content: 'Hello' }, + { langgraph_node: 'chatbot', langgraph_triggers: ['start:chatbot'] }, + ], + } as any]); + transport.close(); + + await new Promise(r => setTimeout(r, 10)); + + // Only the real message should be in messages$, not the metadata + expect(subjects.messages$.value).toHaveLength(1); + expect(subjects.messages$.value[0]).toMatchObject({ id: 'ai-1', content: 'Hello' }); + destroy$.next(); + } + ); + + it('does not accumulate metadata across multiple messages/partial events', async () => { + const transport = new MockAgentTransport(); + const subjects = makeSubjects(); + const destroy$ = new Subject(); + const bridge = createStreamManagerBridge({ + options: { apiUrl: '', assistantId: 'test', transport }, + subjects, + threadId$: of(null), + destroy$: destroy$.asObservable(), + }); + + bridge.submit({}); + + // First values event — sets up the human message + transport.emit([{ + type: 'values', + values: { messages: [{ id: 'h-1', type: 'human', content: 'hi' }] }, + } as any]); + + // Simulate multiple messages/partial events (production SDK shape) + for (let i = 0; i < 5; i++) { + transport.emit([{ + type: 'messages/partial', + messages: [ + { id: 'ai-1', type: 'ai', content: 'Hello'.slice(0, i + 1) }, + { langgraph_node: 'chatbot' }, + ], + data: [ + { id: 'ai-1', type: 'ai', content: 'Hello'.slice(0, i + 1) }, + { langgraph_node: 'chatbot' }, + ], + } as any]); + } + + transport.close(); + await new Promise(r => setTimeout(r, 10)); + + // Should only have human + AI messages, no accumulated metadata + expect(subjects.messages$.value).toHaveLength(2); + expect(subjects.messages$.value[0]).toMatchObject({ id: 'h-1', content: 'hi' }); + expect(subjects.messages$.value[1]).toMatchObject({ id: 'ai-1', content: 'Hello' }); + destroy$.next(); + }); + it('ignores late events from the previous stream after threadId changes', async () => { const transport = new MockAgentTransport(); const subjects = makeSubjects(); diff --git a/libs/agent/src/lib/internals/stream-manager.bridge.ts b/libs/agent/src/lib/internals/stream-manager.bridge.ts index fbce3cc0b..9c43c6752 100644 --- a/libs/agent/src/lib/internals/stream-manager.bridge.ts +++ b/libs/agent/src/lib/internals/stream-manager.bridge.ts @@ -80,6 +80,14 @@ export function createStreamManagerBridge)?.['messages']; + if (Array.isArray(inputMessages) && inputMessages.length > 0) { + const existing = subjects.messages$.value; + subjects.messages$.next([...existing, ...inputMessages as BaseMessage[]]); + } + try { const iter = transport.stream( options.assistantId, @@ -255,7 +263,10 @@ function isMessagesEvent(type: StreamEvent['type']): boolean { function normalizeMessages(event: StreamEvent): unknown[] | null { const directMessages = event['messages']; if (Array.isArray(directMessages)) { - return directMessages; + // Filter out non-message metadata objects (e.g. { langgraph_node, langgraph_triggers }) + // that the LangGraph SDK includes alongside real messages in messages/* events. + const filtered = directMessages.filter(isMessageLike); + return filtered.length > 0 ? filtered : null; } const data = event['data']; diff --git a/libs/chat/src/lib/compositions/chat/chat.component.ts b/libs/chat/src/lib/compositions/chat/chat.component.ts index c5ecd584a..ad66cd45c 100644 --- a/libs/chat/src/lib/compositions/chat/chat.component.ts +++ b/libs/chat/src/lib/compositions/chat/chat.component.ts @@ -10,9 +10,7 @@ import { viewChild, ElementRef, ChangeDetectionStrategy, - inject, } from '@angular/core'; -import { DomSanitizer } from '@angular/platform-browser'; import type { AgentRef } from '@cacheplane/angular'; import type { ViewRegistry, RenderEvent } from '@cacheplane/render'; import type { StateStore } from '@json-render/core'; @@ -28,7 +26,8 @@ import { toRenderRegistry } from '@cacheplane/render'; import { createContentClassifier, type ContentClassifier } from '../../streaming/content-classifier'; import { messageContent } from '../shared/message-utils'; import { CHAT_THEME_STYLES } from '../../styles/chat-theme'; -import { CHAT_MARKDOWN_STYLES, renderMarkdown } from '../../styles/chat-markdown'; +import { CHAT_MARKDOWN_STYLES } from '../../styles/chat-markdown'; +import { ChatStreamingMdComponent } from '../../streaming/streaming-markdown.component'; import { A2uiSurfaceComponent } from '../../a2ui/surface.component'; import type { ChatRenderEvent } from './chat-render-event'; import { KeyValuePipe } from '@angular/common'; @@ -45,6 +44,7 @@ import { KeyValuePipe } from '@angular/common'; ChatInterruptComponent, ChatThreadListComponent, ChatGenerativeUiComponent, + ChatStreamingMdComponent, A2uiSurfaceComponent, KeyValuePipe, ], @@ -129,11 +129,12 @@ import { KeyValuePipe } from '@angular/common'; >A
@if (classified.markdown(); as md) { -
+ [content]="md" + [streaming]="ref().isLoading()" + /> } @if (classified.spec(); as spec) { @@ -214,7 +215,6 @@ import { KeyValuePipe } from '@angular/common'; `, }) export class ChatComponent { - private readonly sanitizer = inject(DomSanitizer); readonly ref = input.required>(); readonly views = input(undefined); @@ -251,7 +251,13 @@ export class ChatComponent { // - During streaming partials, only scroll if user is near bottom effect(() => { const count = this.messageCount(); - this.ref().isLoading(); // track + // Track last message content to trigger scroll during streaming partials + const msgs = this.ref().messages(); + const lastContent = msgs.length > 0 + ? (msgs[msgs.length - 1] as unknown as Record)['content'] + : undefined; + void lastContent; // consume the tracked value + const el = this.scrollContainer()?.nativeElement; if (!el) return; @@ -284,10 +290,6 @@ export class ChatComponent { this.classifiers.clear(); } - renderMd(content: string) { - return renderMarkdown(content, this.sanitizer); - } - onSpecEvent(event: RenderEvent, messageIndex: number): void { this.renderEvent.emit({ messageIndex, event }); } diff --git a/libs/chat/src/lib/primitives/chat-typing-indicator/chat-typing-indicator.component.ts b/libs/chat/src/lib/primitives/chat-typing-indicator/chat-typing-indicator.component.ts index b739c0e2d..0d53d1e1e 100644 --- a/libs/chat/src/lib/primitives/chat-typing-indicator/chat-typing-indicator.component.ts +++ b/libs/chat/src/lib/primitives/chat-typing-indicator/chat-typing-indicator.component.ts @@ -49,5 +49,14 @@ export function isTyping(ref: AgentRef): boolean { }) export class ChatTypingIndicatorComponent { readonly ref = input.required>(); - readonly visible = computed(() => this.ref().isLoading()); + readonly visible = computed(() => { + if (!this.ref().isLoading()) return false; + const msgs = this.ref().messages(); + if (msgs.length === 0) return true; + const last = msgs[msgs.length - 1]; + const type = typeof last._getType === 'function' + ? last._getType() + : (last as unknown as Record)['type'] as string; + return type !== 'ai'; + }); } diff --git a/libs/chat/src/lib/streaming/content-classifier.ts b/libs/chat/src/lib/streaming/content-classifier.ts index b1d02eedf..1fc5cb4b0 100644 --- a/libs/chat/src/lib/streaming/content-classifier.ts +++ b/libs/chat/src/lib/streaming/content-classifier.ts @@ -1,5 +1,5 @@ // SPDX-License-Identifier: PolyForm-Noncommercial-1.0.0 -import { signal, type Signal } from '@angular/core'; +import { signal, untracked, type Signal } from '@angular/core'; import type { Spec } from '@json-render/core'; import { createPartialJsonParser } from '@cacheplane/partial-json'; import { createParseTreeStore, type ElementAccumulationState, type ParseTreeStore } from './parse-tree-store'; @@ -98,82 +98,88 @@ export function createContentClassifier(): ContentClassifier { } function update(content: string): void { - const currentType = typeSignal(); + // Wrap in untracked() because this is called during template rendering + // (via classifyMessage in ChatComponent's AI message template). Angular's + // NG0600 forbids writing signals during change detection; untracked() + // opts out of the reactive graph for this imperative push-based update. + untracked(() => { + const currentType = typeSignal(); + + if (currentType === 'undetermined') { + const detected = detectType(content); + if (detected === 'undetermined') return; + + typeSignal.set(detected); + + if (detected === 'markdown') { + markdownSignal.set(content); + processedLength = content.length; + } else if (detected === 'json-render') { + streamingSignal.set(true); + // Find where JSON starts (skip whitespace) + jsonStartIndex = 0; + for (let i = 0; i < content.length; i++) { + if (content[i] !== ' ' && content[i] !== '\t' && content[i] !== '\n' && content[i] !== '\r') { + jsonStartIndex = i; + break; + } + } + const jsonContent = content.slice(jsonStartIndex); + try { + initJsonStore(jsonContent); + } catch (err) { + errorsSignal.update(prev => [...prev, err instanceof Error ? err.message : String(err)]); + } + processedLength = content.length; + } else if (detected === 'a2ui') { + streamingSignal.set(true); + a2uiParser = createA2uiMessageParser(); + a2uiStore = createA2uiSurfaceStore(); + jsonStartIndex = content.indexOf(A2UI_PREFIX) + A2UI_PREFIX.length; + const a2uiContent = content.slice(jsonStartIndex); + if (a2uiContent.length > 0) { + try { + const msgs = a2uiParser.push(a2uiContent); + for (const msg of msgs) a2uiStore.apply(msg); + a2uiSurfacesSignal.set(a2uiStore.surfaces()); + } catch (err) { + errorsSignal.update(prev => [...prev, err instanceof Error ? err.message : String(err)]); + } + } + processedLength = content.length; + } + return; + } - if (currentType === 'undetermined') { - const detected = detectType(content); - if (detected === 'undetermined') return; + // Compute delta + const delta = content.slice(processedLength); + processedLength = content.length; - typeSignal.set(detected); + if (delta.length === 0) return; - if (detected === 'markdown') { + if (currentType === 'markdown' || currentType === 'mixed') { markdownSignal.set(content); - processedLength = content.length; - } else if (detected === 'json-render') { - streamingSignal.set(true); - // Find where JSON starts (skip whitespace) - jsonStartIndex = 0; - for (let i = 0; i < content.length; i++) { - if (content[i] !== ' ' && content[i] !== '\t' && content[i] !== '\n' && content[i] !== '\r') { - jsonStartIndex = i; - break; + } else if (currentType === 'json-render') { + if (store) { + try { + store.push(delta); + syncJsonSignals(); + } catch (err) { + errorsSignal.update(prev => [...prev, err instanceof Error ? err.message : String(err)]); } } - const jsonContent = content.slice(jsonStartIndex); - try { - initJsonStore(jsonContent); - } catch (err) { - errorsSignal.update(prev => [...prev, err instanceof Error ? err.message : String(err)]); - } - processedLength = content.length; - } else if (detected === 'a2ui') { - streamingSignal.set(true); - a2uiParser = createA2uiMessageParser(); - a2uiStore = createA2uiSurfaceStore(); - jsonStartIndex = content.indexOf(A2UI_PREFIX) + A2UI_PREFIX.length; - const a2uiContent = content.slice(jsonStartIndex); - if (a2uiContent.length > 0) { + } else if (currentType === 'a2ui') { + if (a2uiParser && a2uiStore) { try { - const msgs = a2uiParser.push(a2uiContent); + const msgs = a2uiParser.push(delta); for (const msg of msgs) a2uiStore.apply(msg); a2uiSurfacesSignal.set(a2uiStore.surfaces()); } catch (err) { errorsSignal.update(prev => [...prev, err instanceof Error ? err.message : String(err)]); } } - processedLength = content.length; } - return; - } - - // Compute delta - const delta = content.slice(processedLength); - processedLength = content.length; - - if (delta.length === 0) return; - - if (currentType === 'markdown' || currentType === 'mixed') { - markdownSignal.set(content); - } else if (currentType === 'json-render') { - if (store) { - try { - store.push(delta); - syncJsonSignals(); - } catch (err) { - errorsSignal.update(prev => [...prev, err instanceof Error ? err.message : String(err)]); - } - } - } else if (currentType === 'a2ui') { - if (a2uiParser && a2uiStore) { - try { - const msgs = a2uiParser.push(delta); - for (const msg of msgs) a2uiStore.apply(msg); - a2uiSurfacesSignal.set(a2uiStore.surfaces()); - } catch (err) { - errorsSignal.update(prev => [...prev, err instanceof Error ? err.message : String(err)]); - } - } - } + }); } function dispose(): void { diff --git a/libs/chat/src/lib/streaming/streaming-markdown.component.ts b/libs/chat/src/lib/streaming/streaming-markdown.component.ts new file mode 100644 index 000000000..b775de388 --- /dev/null +++ b/libs/chat/src/lib/streaming/streaming-markdown.component.ts @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: PolyForm-Noncommercial-1.0.0 +import { + Component, + input, + effect, + ElementRef, + inject, + ChangeDetectionStrategy, + untracked, +} from '@angular/core'; +import { DomSanitizer } from '@angular/platform-browser'; +import { createStreamingMarkdownRenderer, type StreamingMarkdownRenderer } from './streaming-markdown'; +import { renderMarkdownToString } from '../styles/chat-markdown'; + +/** + * Renders markdown content using a streaming append-only DOM renderer + * during active streaming, then switches to a full marked.parse() render + * once the content stabilises (no new content for a frame). + * + * This eliminates the jank caused by full innerHTML replacement on every + * SSE token — the streaming renderer only appends new DOM nodes. + */ +@Component({ + selector: 'chat-streaming-md', + standalone: true, + changeDetection: ChangeDetectionStrategy.OnPush, + template: '', + styles: `:host { display: block; }`, +}) +export class ChatStreamingMdComponent { + private readonly el = inject(ElementRef).nativeElement as HTMLElement; + private readonly sanitizer = inject(DomSanitizer); + + /** Full markdown content (updated on every partial) */ + readonly content = input.required(); + /** Whether the parent stream is still loading */ + readonly streaming = input(false); + + private renderer: StreamingMarkdownRenderer | null = null; + private lastContent = ''; + private finalised = false; + + constructor() { + effect(() => { + const content = this.content(); + const isStreaming = this.streaming(); + + untracked(() => this.render(content, isStreaming)); + }); + } + + private render(content: string, isStreaming: boolean): void { + if (!content) return; + + if (isStreaming) { + // Streaming mode: use append-only renderer with deltas + if (!this.renderer) { + this.renderer = createStreamingMarkdownRenderer(); + this.el.textContent = ''; + this.el.appendChild(this.renderer.container); + this.finalised = false; + } + + // Compute delta from last known content + const delta = content.slice(this.lastContent.length); + this.lastContent = content; + + if (delta) { + this.renderer.push(delta); + } + } else { + // Stream complete: do a single high-quality marked.parse() render + if (!this.finalised || content !== this.lastContent) { + this.lastContent = content; + this.finalised = true; + this.renderer = null; + + this.el.innerHTML = renderMarkdownToString(content, this.sanitizer); + } + } + } +} diff --git a/libs/chat/src/lib/streaming/streaming-markdown.spec.ts b/libs/chat/src/lib/streaming/streaming-markdown.spec.ts new file mode 100644 index 000000000..4ef62a007 --- /dev/null +++ b/libs/chat/src/lib/streaming/streaming-markdown.spec.ts @@ -0,0 +1,405 @@ +// SPDX-License-Identifier: PolyForm-Noncommercial-1.0.0 +import { describe, it, expect, beforeEach } from 'vitest'; +import { + createStreamingMarkdownRenderer, + type StreamingMarkdownRenderer, +} from './streaming-markdown'; + +describe('StreamingMarkdownRenderer', () => { + let renderer: StreamingMarkdownRenderer; + + beforeEach(() => { + renderer = createStreamingMarkdownRenderer(); + }); + + describe('container', () => { + it('should have class chat-md', () => { + expect(renderer.container.className).toBe('chat-md'); + }); + + it('should be a div element', () => { + expect(renderer.container.tagName).toBe('DIV'); + }); + }); + + describe('plain text renders as paragraph', () => { + it('should wrap plain text in a

tag', () => { + renderer.push('Hello world'); + renderer.finish(); + const p = renderer.container.querySelector('p'); + expect(p).not.toBeNull(); + expect(p!.textContent).toBe('Hello world'); + }); + + it('should create separate paragraphs for text separated by blank lines', () => { + renderer.push('First paragraph\n\nSecond paragraph'); + renderer.finish(); + const paragraphs = renderer.container.querySelectorAll('p'); + expect(paragraphs).toHaveLength(2); + expect(paragraphs[0].textContent).toBe('First paragraph'); + expect(paragraphs[1].textContent).toBe('Second paragraph'); + }); + + it('should join consecutive non-blank lines in the same paragraph', () => { + renderer.push('Line one\nLine two'); + renderer.finish(); + const paragraphs = renderer.container.querySelectorAll('p'); + expect(paragraphs).toHaveLength(1); + expect(paragraphs[0].textContent).toBe('Line one Line two'); + }); + }); + + describe('bold and italic inline formatting', () => { + it('should render **text** as ', () => { + renderer.push('This is **bold** text'); + renderer.finish(); + const strong = renderer.container.querySelector('strong'); + expect(strong).not.toBeNull(); + expect(strong!.textContent).toBe('bold'); + }); + + it('should render *text* as ', () => { + renderer.push('This is *italic* text'); + renderer.finish(); + const em = renderer.container.querySelector('em'); + expect(em).not.toBeNull(); + expect(em!.textContent).toBe('italic'); + }); + + it('should handle bold and italic in the same line', () => { + renderer.push('**bold** and *italic*'); + renderer.finish(); + expect(renderer.container.querySelector('strong')!.textContent).toBe('bold'); + expect(renderer.container.querySelector('em')!.textContent).toBe('italic'); + }); + + it('should handle nested bold inside text', () => { + renderer.push('Start **middle** end'); + renderer.finish(); + const p = renderer.container.querySelector('p')!; + expect(p.innerHTML).toBe('Start middle end'); + }); + }); + + describe('headers (h1-h4)', () => { + it('should render # as h1', () => { + renderer.push('# Heading 1'); + renderer.finish(); + const h1 = renderer.container.querySelector('h1'); + expect(h1).not.toBeNull(); + expect(h1!.textContent).toBe('Heading 1'); + }); + + it('should render ## as h2', () => { + renderer.push('## Heading 2'); + renderer.finish(); + const h2 = renderer.container.querySelector('h2'); + expect(h2).not.toBeNull(); + expect(h2!.textContent).toBe('Heading 2'); + }); + + it('should render ### as h3', () => { + renderer.push('### Heading 3'); + renderer.finish(); + const h3 = renderer.container.querySelector('h3'); + expect(h3).not.toBeNull(); + expect(h3!.textContent).toBe('Heading 3'); + }); + + it('should render #### as h4', () => { + renderer.push('#### Heading 4'); + renderer.finish(); + const h4 = renderer.container.querySelector('h4'); + expect(h4).not.toBeNull(); + expect(h4!.textContent).toBe('Heading 4'); + }); + + it('should support inline formatting in headers', () => { + renderer.push('## A **bold** heading'); + renderer.finish(); + const h2 = renderer.container.querySelector('h2')!; + expect(h2.querySelector('strong')!.textContent).toBe('bold'); + }); + }); + + describe('unordered and ordered lists', () => { + it('should render - items as