Quick Start#
The fastest way to get started with assistant-ui.
![]()
Initialize assistant-ui#
Create a new project:
npx assistant-ui@latest create
Or choose a template:
# Minimal starter
npx assistant-ui@latest create -t minimal
# Assistant Cloud - with persistence and thread management
npx assistant-ui@latest create -t cloud
# Assistant Cloud + Clerk authentication
npx assistant-ui@latest create -t cloud-clerk
# LangGraph starter template
npx assistant-ui@latest create -t langgraph
# MCP starter template
npx assistant-ui@latest create -t mcp
Add to an existing project:
npx assistant-ui@latest init
Add API key#
Create a .env file with your API key:
OPENAI_API_KEY="sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
Start the app#
npm run dev
Manual Setup#
If you prefer not to use the CLI, you can install components manually.
Add assistant-ui#
<InstallCommand shadcn={["thread", "thread-list"]} manualSetupInstructions />
Setup Backend Endpoint#
Install provider SDK:
<Tabs groupId="provider" items={["OpenAI", "Anthropic", "Azure", "AWS", "Gemini", "GCP", "Groq", "Fireworks", "Cohere", "Ollama", "Chrome AI"]}>
<InstallCommand npm={["ai", "@assistant-ui/react-ai-sdk", "@ai-sdk/openai"]} />
<InstallCommand npm={["ai", "@assistant-ui/react-ai-sdk", "@ai-sdk/anthropic"]} />
<InstallCommand npm={["ai", "@assistant-ui/react-ai-sdk", "@ai-sdk/azure"]} />
<InstallCommand npm={["ai", "@assistant-ui/react-ai-sdk", "@ai-sdk/amazon-bedrock"]} />
<InstallCommand npm={["ai", "@assistant-ui/react-ai-sdk", "@ai-sdk/google"]} />
<InstallCommand npm={["ai", "@assistant-ui/react-ai-sdk", "@ai-sdk/google-vertex"]} />
<InstallCommand npm={["ai", "@assistant-ui/react-ai-sdk", "@ai-sdk/groq"]} />
<InstallCommand npm={["ai", "@assistant-ui/react-ai-sdk", "@ai-sdk/fireworks"]} />
<InstallCommand npm={["ai", "@assistant-ui/react-ai-sdk", "@ai-sdk/cohere"]} />
<InstallCommand npm={["ai", "@assistant-ui/react-ai-sdk", "ollama-ai-provider-v2"]} />
<InstallCommand npm={["ai", "@assistant-ui/react-ai-sdk", "chrome-ai"]} />
Add an API endpoint:
<Tabs groupId="provider" items={["OpenAI", "Anthropic", "Azure", "AWS", "Gemini", "GCP", "Groq", "Fireworks", "Cohere", "Ollama", "Chrome AI"]}>
import { openai } from "@ai-sdk/openai";
import { frontendTools } from "@assistant-ui/react-ai-sdk";
import { convertToModelMessages, streamText } from "ai";
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages, system, tools } = await req.json();
const result = streamText({
model: openai("gpt-4o-mini"),
system,
messages: await convertToModelMessages(messages),
tools: frontendTools(tools),
});
return result.toUIMessageStreamResponse();
}
import { anthropic } from "@ai-sdk/anthropic";
import { frontendTools } from "@assistant-ui/react-ai-sdk";
import { convertToModelMessages, streamText } from "ai";
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages, system, tools } = await req.json();
const result = streamText({
model: anthropic("claude-sonnet-4-6"),
system,
messages: await convertToModelMessages(messages),
tools: frontendTools(tools),
});
return result.toUIMessageStreamResponse();
}
import { azure } from "@ai-sdk/azure";
import { frontendTools } from "@assistant-ui/react-ai-sdk";
import { convertToModelMessages, streamText } from "ai";
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages, system, tools } = await req.json();
const result = streamText({
model: azure("your-deployment-name"),
system,
messages: await convertToModelMessages(messages),
tools: frontendTools(tools),
});
return result.toUIMessageStreamResponse();
}
import { bedrock } from "@ai-sdk/amazon-bedrock";
import { frontendTools } from "@assistant-ui/react-ai-sdk";
import { convertToModelMessages, streamText } from "ai";
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages, system, tools } = await req.json();
const result = streamText({
model: bedrock("anthropic.claude-sonnet-4-6-v1:0"),
system,
messages: await convertToModelMessages(messages),
tools: frontendTools(tools),
});
return result.toUIMessageStreamResponse();
}
import { google } from "@ai-sdk/google";
import { frontendTools } from "@assistant-ui/react-ai-sdk";
import { convertToModelMessages, streamText } from "ai";
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages, system, tools } = await req.json();
const result = streamText({
model: google("gemini-2.0-flash"),
system,
messages: await convertToModelMessages(messages),
tools: frontendTools(tools),
});
return result.toUIMessageStreamResponse();
}
import { vertex } from "@ai-sdk/google-vertex";
import { frontendTools } from "@assistant-ui/react-ai-sdk";
import { convertToModelMessages, streamText } from "ai";
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages, system, tools } = await req.json();
const result = streamText({
model: vertex("gemini-2.0-flash"),
system,
messages: await convertToModelMessages(messages),
tools: frontendTools(tools),
});
return result.toUIMessageStreamResponse();
}
import { groq } from "@ai-sdk/groq";
import { frontendTools } from "@assistant-ui/react-ai-sdk";
import { convertToModelMessages, streamText } from "ai";
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages, system, tools } = await req.json();
const result = streamText({
model: groq("llama-3.3-70b-versatile"),
system,
messages: await convertToModelMessages(messages),
tools: frontendTools(tools),
});
return result.toUIMessageStreamResponse();
}
import { fireworks } from "@ai-sdk/fireworks";
import { frontendTools } from "@assistant-ui/react-ai-sdk";
import { convertToModelMessages, streamText } from "ai";
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages, system, tools } = await req.json();
const result = streamText({
model: fireworks("accounts/fireworks/models/llama-v3p3-70b-instruct"),
system,
messages: await convertToModelMessages(messages),
tools: frontendTools(tools),
});
return result.toUIMessageStreamResponse();
}
import { cohere } from "@ai-sdk/cohere";
import { frontendTools } from "@assistant-ui/react-ai-sdk";
import { convertToModelMessages, streamText } from "ai";
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages, system, tools } = await req.json();
const result = streamText({
model: cohere("command-r-plus"),
system,
messages: await convertToModelMessages(messages),
tools: frontendTools(tools),
});
return result.toUIMessageStreamResponse();
}
import { ollama } from "ollama-ai-provider-v2";
import { frontendTools } from "@assistant-ui/react-ai-sdk";
import { convertToModelMessages, streamText } from "ai";
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages, system, tools } = await req.json();
const result = streamText({
model: ollama("llama3"),
system,
messages: await convertToModelMessages(messages),
tools: frontendTools(tools),
});
return result.toUIMessageStreamResponse();
}
import { chromeai } from "chrome-ai";
import { frontendTools } from "@assistant-ui/react-ai-sdk";
import { convertToModelMessages, streamText } from "ai";
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages, system, tools } = await req.json();
const result = streamText({
model: chromeai(),
system,
messages: await convertToModelMessages(messages),
tools: frontendTools(tools),
});
return result.toUIMessageStreamResponse();
}
Define environment variables:
<Tabs groupId="provider" items={["OpenAI", "Anthropic", "Azure", "AWS", "Gemini", "GCP", "Groq", "Fireworks", "Cohere", "Ollama", "Chrome AI"]}>
OPENAI_API_KEY="sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
ANTHROPIC_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
AZURE_RESOURCE_NAME="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
AZURE_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
AWS_ACCESS_KEY_ID="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
AWS_SECRET_ACCESS_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
AWS_REGION="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
GOOGLE_GENERATIVE_AI_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
GOOGLE_VERTEX_PROJECT="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
GOOGLE_VERTEX_LOCATION="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
GOOGLE_APPLICATION_CREDENTIALS="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
GROQ_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
FIREWORKS_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
COHERE_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
<none>
<none>
If you aren't using Next.js, you can also deploy this endpoint to Cloudflare Workers, or any other serverless platform.
Use it in your app#
<Tabs items={["Thread", "AssistantModal"]}>
import { AssistantRuntimeProvider } from "@assistant-ui/react";
import { useChatRuntime, AssistantChatTransport } from "@assistant-ui/react-ai-sdk";
import { ThreadList } from "@/components/assistant-ui/thread-list";
import { Thread } from "@/components/assistant-ui/thread";
export default function MyApp() {
const runtime = useChatRuntime({
transport: new AssistantChatTransport({
api: "/api/chat",
}),
});
return (
<AssistantRuntimeProvider runtime={runtime}>
<div>
<ThreadList />
<Thread />
</div>
</AssistantRuntimeProvider>
);
}
// run `npx shadcn@latest add https://r.assistant-ui.com/assistant-modal.json`
import { AssistantRuntimeProvider } from "@assistant-ui/react";
import { useChatRuntime, AssistantChatTransport } from "@assistant-ui/react-ai-sdk";
import { AssistantModal } from "@/components/assistant-ui/assistant-modal";
export default function MyApp() {
const runtime = useChatRuntime({
transport: new AssistantChatTransport({
api: "/api/chat",
}),
});
return (
<AssistantRuntimeProvider runtime={runtime}>
<AssistantModal />
</AssistantRuntimeProvider>
);
}