React Integration
Build real-time chat UIs with React using Vercel AI SDK's useChat hook. This guide shows how to display streaming text, tool invocations, and thinking content from Helix Agents.
Prerequisites
bash
npm install ai @ai-sdk/reactBasic Chat Component
tsx
import { useChat } from 'ai/react';
function ChatComponent() {
const { messages, input, handleInputChange, handleSubmit, isLoading, error } = useChat({
api: '/api/chat',
});
return (
<div className="chat-container">
{/* Message List */}
<div className="messages">
{messages.map((message) => (
<div key={message.id} className={`message ${message.role}`}>
<MessageContent message={message} />
</div>
))}
</div>
{/* Error Display */}
{error && <div className="error">Error: {error.message}</div>}
{/* Input Form */}
<form onSubmit={handleSubmit} className="input-form">
<input
value={input}
onChange={handleInputChange}
placeholder="Type a message..."
disabled={isLoading}
/>
<button type="submit" disabled={isLoading}>
{isLoading ? 'Sending...' : 'Send'}
</button>
</form>
</div>
);
}Rendering Message Parts (AI SDK v6)
In AI SDK v6, messages use parts as the source of truth:
tsx
function MessageContent({ message }: { message: UIMessage }) {
return (
<div className="message-content">
{message.parts.map((part, index) => (
<MessagePart key={index} part={part} />
))}
</div>
);
}
function MessagePart({ part }: { part: UIMessagePart }) {
// Text content
if (part.type === 'text') {
return <p className="text-part">{part.text}</p>;
}
// Reasoning/thinking content
if (part.type === 'reasoning') {
return (
<div className="reasoning-part">
<span className="label">Thinking:</span>
<p>{part.text}</p>
</div>
);
}
// Tool invocation (type is `tool-${toolName}`)
if (part.type.startsWith('tool-')) {
return <ToolInvocation part={part} />;
}
return null;
}Using Type Guards
For type-safe filtering and checks, use the provided type guards:
tsx
import {
isUIToolInvocationPart,
hasPendingTools,
hasErroredTools,
getToolParts,
} from '@helix-agents/ai-sdk';
function MessagePart({ part }: { part: UIMessagePart }) {
// Use type guards for safe discrimination
if (isUIToolInvocationPart(part)) {
return <ToolInvocation part={part} />;
}
// part is narrowed to UITextPart | UIReasoningPart
// ...
}
function Message({ message }: { message: UIMessage }) {
// Check for loading state
const isLoading = hasPendingTools(message);
// Check for errors
const hasErrors = hasErroredTools(message);
// Get all tool parts
const tools = getToolParts(message);
return (
<div className={`message ${hasErrors ? 'error' : ''}`}>
{message.parts.map((part, i) => (
<MessagePart key={i} part={part} />
))}
{isLoading && <LoadingSpinner />}
</div>
);
}Displaying Tool Invocations
tsx
function ToolInvocation({ part }: { part: UIMessageToolInvocationPart }) {
// Extract tool name from type (e.g., 'tool-search' → 'search')
const toolName = part.type.replace('tool-', '');
return (
<div className={`tool-invocation ${part.state}`}>
<div className="tool-header">
<ToolIcon name={toolName} />
<span className="tool-name">{toolName}</span>
<ToolStatus state={part.state} />
</div>
{/* Show input */}
<div className="tool-input">
<span className="label">Input:</span>
<pre>{JSON.stringify(part.input, null, 2)}</pre>
</div>
{/* Show output when available */}
{part.state === 'output-available' && part.output && (
<div className="tool-output">
<span className="label">Output:</span>
<pre>{JSON.stringify(part.output, null, 2)}</pre>
</div>
)}
{/* Show error if failed */}
{part.state === 'output-error' && part.errorText && (
<div className="tool-error">
<span className="label">Error:</span>
<span>{part.errorText}</span>
</div>
)}
</div>
);
}
function ToolStatus({ state }: { state: ToolInvocationState }) {
switch (state) {
case 'input-streaming':
return <span className="status streaming">Streaming...</span>;
case 'input-available':
return <span className="status pending">Running...</span>;
case 'output-available':
return <span className="status success">Completed</span>;
case 'output-error':
return <span className="status error">Failed</span>;
}
}Streaming Indicators
Show streaming state during generation:
tsx
function ChatComponent() {
const { messages, isLoading, status } = useChat({
api: '/api/chat',
});
return (
<div>
{messages.map((message) => (
<Message key={message.id} message={message} />
))}
{/* Streaming indicator */}
{isLoading && (
<div className="streaming-indicator">
<LoadingDots />
<span>Agent is thinking...</span>
</div>
)}
</div>
);
}Loading Existing Conversations
Restore conversation state with initialMessages:
tsx
function ChatPage({ sessionId }: { sessionId: string }) {
const [initialMessages, setInitialMessages] = useState<UIMessage[]>([]);
const [loading, setLoading] = useState(true);
// Load messages on mount
useEffect(() => {
async function loadMessages() {
const res = await fetch(`/api/messages/${sessionId}`);
const { messages } = await res.json();
setInitialMessages(messages);
setLoading(false);
}
loadMessages();
}, [sessionId]);
const { messages, input, handleInputChange, handleSubmit } = useChat({
api: '/api/chat',
initialMessages,
});
if (loading) {
return <LoadingSpinner />;
}
return (
<div>
{messages.map((msg) => (
<Message key={msg.id} message={msg} />
))}
{/* ... input form ... */}
</div>
);
}Using Store Utilities
For direct access to stored messages, use the store utilities:
tsx
import { loadAllUIMessages } from '@helix-agents/ai-sdk';
function ChatPage({ sessionId }: { sessionId: string }) {
const [initialMessages, setInitialMessages] = useState<UIMessage[]>([]);
const [loading, setLoading] = useState(true);
useEffect(() => {
// Load directly from state store
loadAllUIMessages(stateStore, sessionId)
.then((messages) => {
setInitialMessages(messages);
setLoading(false);
});
}, [sessionId]);
const { messages } = useChat({
api: '/api/chat',
initialMessages,
});
if (loading) return <LoadingSpinner />;
return (
<div>
{messages.map((msg) => (
<Message key={msg.id} message={msg} />
))}
</div>
);
}Multi-Turn Conversations
Enable conversation continuation where each message builds on the previous exchange:
tsx
function MultiTurnChat() {
// Use a stable sessionId for conversation continuity
const [sessionId] = useState<string>(() => crypto.randomUUID());
const { messages, input, handleInputChange, handleSubmit, isLoading } = useChat({
api: '/api/chat',
// Pass sessionId for all messages in the conversation
body: { sessionId },
});
// Start a new conversation with a new sessionId
const startNewConversation = () => {
window.location.reload(); // Or manage sessionId state to generate a new one
};
return (
<div>
<button onClick={startNewConversation}>New Conversation</button>
{messages.map((msg) => (
<Message key={msg.id} message={msg} />
))}
<form onSubmit={handleSubmit}>
<input
value={input}
onChange={handleInputChange}
placeholder="Type a message..."
disabled={isLoading}
/>
<button type="submit" disabled={isLoading}>
Send
</button>
</form>
</div>
);
}How It Works
- Generate a
sessionIdon component mount (or from your URL/state) - Pass
sessionIdin every request body - Backend automatically continues the conversation within the same session
- Each turn creates a new run within the session (for debugging, billing, tracing)
Request Body
typescript
// All messages include sessionId
{ message: 'Hello', sessionId: 'session-123' }
// Subsequent messages - same sessionId
{ message: 'Tell me more', sessionId: 'session-123' }
// Or with your own message history
{ message: 'Tell me more', messages: myStoredMessages }Using External Message History
When you manage your own conversation storage:
typescript
const { messages, handleSubmit } = useChat({
api: '/api/chat',
body: {
// Pass your stored messages from your own database
messages: myStoredMessages.map((m) => ({
role: m.role,
content: m.content,
})),
},
});Behavior Table
| Input | Messages Source | State Source |
|---|---|---|
message only (new session) | Empty (fresh) | Empty (fresh) |
message + sessionId (existing) | From session | From session |
message + messages | From messages | Empty (fresh) |
message + state | Empty (fresh) | From state |
message + sessionId + messages | From messages (override) | From session |
message + sessionId + state | From session | From state (override) |
| All four | From messages (override) | From state (override) |
Custom Data Events
Handle custom events from your agent:
tsx
function ChatComponent() {
const [customData, setCustomData] = useState<Record<string, unknown>>({});
const { messages } = useChat({
api: '/api/chat',
onResponse: async (response) => {
// Handle custom data events
const reader = response.body?.getReader();
// Process SSE stream for data-* events
},
});
// Or use experimental data streaming
const { data } = useChat({
api: '/api/chat',
});
useEffect(() => {
if (data) {
// Process data events
for (const item of data) {
if (item.type === 'data-output') {
setCustomData(item.data);
}
}
}
}, [data]);
return (
<div>
{messages.map((msg) => (
<Message key={msg.id} message={msg} />
))}
{/* Display final output */}
{customData.output && <OutputDisplay output={customData.output} />}
</div>
);
}Error Handling
Handle errors gracefully:
tsx
function ChatComponent() {
const { messages, error, reload, stop } = useChat({
api: '/api/chat',
onError: (error) => {
console.error('Chat error:', error);
// Show toast notification, etc.
},
});
return (
<div>
{messages.map((msg) => (
<Message key={msg.id} message={msg} />
))}
{error && (
<div className="error-banner">
<p>Something went wrong: {error.message}</p>
<div className="error-actions">
<button onClick={() => reload()}>Retry</button>
<button onClick={() => stop()}>Stop</button>
</div>
</div>
)}
</div>
);
}Thinking/Reasoning Display
Show Claude's thinking or OpenAI's reasoning:
tsx
function ReasoningDisplay({ text }: { text: string }) {
const [isExpanded, setIsExpanded] = useState(false);
return (
<div className="reasoning-block">
<button className="reasoning-toggle" onClick={() => setIsExpanded(!isExpanded)}>
<ThinkingIcon />
<span>View reasoning</span>
<ChevronIcon direction={isExpanded ? 'up' : 'down'} />
</button>
{isExpanded && (
<div className="reasoning-content">
<pre>{text}</pre>
</div>
)}
</div>
);
}
function MessagePart({ part }: { part: UIMessagePart }) {
if (part.type === 'reasoning') {
return <ReasoningDisplay text={part.text} />;
}
// ... other parts
}Sub-Agent Display
Handle sub-agent events:
tsx
function SubAgentDisplay({
type,
sessionId,
status,
}: {
type: string;
sessionId: string;
status: 'running' | 'completed' | 'failed';
}) {
return (
<div className={`subagent ${status}`}>
<div className="subagent-header">
<AgentIcon type={type} />
<span className="subagent-type">{type}</span>
<span className="subagent-status">
{status === 'running' ? (
<Spinner />
) : status === 'completed' ? (
<CheckIcon />
) : (
<ErrorIcon />
)}
</span>
</div>
<span className="subagent-id">Session: {sessionId}</span>
</div>
);
}Complete Example
tsx
import { useChat } from 'ai/react';
import type { UIMessage, UIMessagePart } from '@helix-agents/ai-sdk';
function Chat() {
const { messages, input, handleInputChange, handleSubmit, isLoading, error, reload, stop } =
useChat({
api: '/api/chat',
});
return (
<div className="chat">
{/* Messages */}
<div className="messages">
{messages.map((message) => (
<div key={message.id} className={`message ${message.role}`}>
<Avatar role={message.role} />
<div className="message-body">
{message.parts.map((part, i) => (
<MessagePart key={i} part={part} />
))}
</div>
</div>
))}
{/* Streaming indicator */}
{isLoading && <StreamingIndicator />}
</div>
{/* Error */}
{error && <ErrorBanner error={error} onRetry={reload} onStop={stop} />}
{/* Input */}
<form onSubmit={handleSubmit} className="input-form">
<textarea
value={input}
onChange={handleInputChange}
placeholder="Message the agent..."
disabled={isLoading}
rows={1}
/>
<button type="submit" disabled={isLoading || !input.trim()}>
<SendIcon />
</button>
</form>
</div>
);
}
function MessagePart({ part }: { part: UIMessagePart }) {
if (part.type === 'text') {
return <TextPart text={part.text} />;
}
if (part.type === 'reasoning') {
return <ReasoningPart text={part.text} />;
}
if (part.type.startsWith('tool-')) {
return <ToolPart part={part} />;
}
return null;
}
function TextPart({ text }: { text: string }) {
// Use markdown rendering if needed
return <div className="text-part">{text}</div>;
}
function ReasoningPart({ text }: { text: string }) {
const [expanded, setExpanded] = useState(false);
return (
<div className="reasoning-part">
<button onClick={() => setExpanded(!expanded)}>{expanded ? 'Hide' : 'Show'} thinking</button>
{expanded && <pre>{text}</pre>}
</div>
);
}
function ToolPart({ part }: { part: UIMessageToolInvocationPart }) {
const toolName = part.type.replace('tool-', '');
return (
<div className={`tool-part ${part.state}`}>
<div className="tool-header">
<strong>{toolName}</strong>
<StatusBadge state={part.state} />
</div>
<details>
<summary>Input</summary>
<pre>{JSON.stringify(part.input, null, 2)}</pre>
</details>
{part.state === 'output-available' && (
<details open>
<summary>Output</summary>
<pre>{JSON.stringify(part.output, null, 2)}</pre>
</details>
)}
{part.state === 'output-error' && <div className="error">{part.errorText}</div>}
</div>
);
}
export default Chat;Styling Tips
css
/* Basic chat styling */
.chat {
display: flex;
flex-direction: column;
height: 100vh;
}
.messages {
flex: 1;
overflow-y: auto;
padding: 1rem;
}
.message {
display: flex;
gap: 0.75rem;
margin-bottom: 1rem;
}
.message.user {
flex-direction: row-reverse;
}
.message.assistant .message-body {
background: #f3f4f6;
border-radius: 1rem;
padding: 0.75rem 1rem;
}
/* Tool invocation styling */
.tool-part {
border: 1px solid #e5e7eb;
border-radius: 0.5rem;
padding: 0.75rem;
margin: 0.5rem 0;
}
.tool-part.input-available {
border-color: #fbbf24;
background: #fffbeb;
}
.tool-part.output-available {
border-color: #22c55e;
background: #f0fdf4;
}
.tool-part.output-error {
border-color: #ef4444;
background: #fef2f2;
}
/* Input form */
.input-form {
display: flex;
gap: 0.5rem;
padding: 1rem;
border-top: 1px solid #e5e7eb;
}
.input-form textarea {
flex: 1;
resize: none;
border: 1px solid #e5e7eb;
border-radius: 0.5rem;
padding: 0.75rem;
}Next Steps
- Framework Examples - Backend setup with Express, Hono
- AI SDK Package - Deep dive into the package
- Streaming Guide - Understanding stream chunks