Skip to content

Update vercel ai sdk to v4 #65

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -40,4 +40,6 @@ next-env.d.ts
!.yarn/releases
!.yarn/sdks
!.yarn/versions
.env
.env

.vscode
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we dont want to gitignore this

Suggested change
.vscode

43 changes: 13 additions & 30 deletions app/api/chat/agents/route.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { NextRequest, NextResponse } from "next/server";
import { Message as VercelChatMessage, StreamingTextResponse } from "ai";
import { Message as VercelChatMessage, LangChainAdapter } from "ai";

import { createReactAgent } from "@langchain/langgraph/prebuilt";
import { ChatOpenAI } from "@langchain/openai";
Expand All @@ -26,23 +26,28 @@ const convertVercelMessageToLangChainMessage = (message: VercelChatMessage) => {
};

const convertLangChainMessageToVercelMessage = (message: BaseMessage) => {
if (message._getType() === "human") {
if (message.getType() === "human") {
return { content: message.content, role: "user" };
} else if (message._getType() === "ai") {
} else if (message.getType() === "ai") {
return {
content: message.content,
role: "assistant",
tool_calls: (message as AIMessage).tool_calls,
parts: (message as AIMessage).tool_calls,
};
} else if (message.getType() === "tool") {
return {
content: message.content,
role: "system",
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this should be role: tool?

Suggested change
role: "system",
role: "tool",

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Vercel Message has bound the type to role: 'system' | 'user' | 'assistant' | 'data'; so I thought system was the closest to tool

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hmm weird. id need to read the docs then but i dont think system would be the right role for this

};
} else {
return { content: message.content, role: message._getType() };
return { content: message.content, role: message.getType() };
}
};

const AGENT_SYSTEM_TEMPLATE = `You are a talking parrot named Polly. All final responses must be how a talking parrot would respond. Squawk often!`;

/**
* This handler initializes and calls an tool caling ReAct agent.
* This handler initializes and calls an tool calling ReAct agent.
* See the docs for more information:
*
* https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/
Expand Down Expand Up @@ -89,36 +94,14 @@ export async function POST(req: NextRequest) {
/**
* Stream back all generated tokens and steps from their runs.
*
* We do some filtering of the generated events and only stream back
* the final response as a string.
*
* For this specific type of tool calling ReAct agents with OpenAI, we can tell when
* the agent is ready to stream back final output when it no longer calls
* a tool and instead streams back content.
*
* See: https://langchain-ai.github.io/langgraphjs/how-tos/stream-tokens/
*/
const eventStream = await agent.streamEvents(
{ messages },
{ version: "v2" },
);

const textEncoder = new TextEncoder();
const transformStream = new ReadableStream({
async start(controller) {
for await (const { event, data } of eventStream) {
if (event === "on_chat_model_stream") {
// Intermediate chat model generations will contain tool calls and no content
if (!!data.chunk.content) {
controller.enqueue(textEncoder.encode(data.chunk.content));
}
}
}
controller.close();
},
});

return new StreamingTextResponse(transformStream);
const eventStream = agent.streamEvents({ messages }, { version: "v2" });
return LangChainAdapter.toDataStreamResponse(eventStream);
} else {
/**
* We could also pick intermediate steps out from `streamEvents` chunks, but
Expand Down
13 changes: 7 additions & 6 deletions app/api/chat/retrieval/route.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { NextRequest, NextResponse } from "next/server";
import { Message as VercelChatMessage, StreamingTextResponse } from "ai";
import { LangChainAdapter, Message as VercelChatMessage } from "ai";

import { createClient } from "@supabase/supabase-js";

Expand Down Expand Up @@ -140,7 +140,6 @@ export async function POST(req: NextRequest) {
chat_history: (input) => input.chat_history,
},
answerChain,
new BytesOutputParser(),
]);

const stream = await conversationalRetrievalQAChain.stream({
Expand All @@ -160,10 +159,12 @@ export async function POST(req: NextRequest) {
),
).toString("base64");

return new StreamingTextResponse(stream, {
headers: {
"x-message-index": (previousMessages.length + 1).toString(),
"x-sources": serializedSources,
return LangChainAdapter.toDataStreamResponse(stream, {
init: {
headers: {
"x-message-index": (previousMessages.length + 1).toString(),
"x-sources": serializedSources,
},
},
});
} catch (e: any) {
Expand Down
28 changes: 5 additions & 23 deletions app/api/chat/retrieval_agents/route.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { NextRequest, NextResponse } from "next/server";
import { Message as VercelChatMessage, StreamingTextResponse } from "ai";
import { LangChainAdapter, Message as VercelChatMessage } from "ai";

import { createClient } from "@supabase/supabase-js";

Expand Down Expand Up @@ -46,7 +46,7 @@ const AGENT_SYSTEM_TEMPLATE = `You are a stereotypical robot named Robbie and mu
If you don't know how to answer a question, use the available tools to look up relevant information. You should particularly do this for questions about LangChain.`;

/**
* This handler initializes and calls an tool caling ReAct agent.
* This handler initializes and calls an tool calling ReAct agent.
* See the docs for more information:
*
* https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/
Expand Down Expand Up @@ -96,7 +96,7 @@ export async function POST(req: NextRequest) {
/**
* Use a prebuilt LangGraph agent.
*/
const agent = await createReactAgent({
const agent = createReactAgent({
llm: chatModel,
tools: [tool],
/**
Expand All @@ -112,38 +112,20 @@ export async function POST(req: NextRequest) {
/**
* Stream back all generated tokens and steps from their runs.
*
* We do some filtering of the generated events and only stream back
* the final response as a string.
*
* For this specific type of tool calling ReAct agents with OpenAI, we can tell when
* the agent is ready to stream back final output when it no longer calls
* a tool and instead streams back content.
*
* See: https://langchain-ai.github.io/langgraphjs/how-tos/stream-tokens/
*/
const eventStream = await agent.streamEvents(
const eventStream = agent.streamEvents(
{
messages,
},
{ version: "v2" },
);

const textEncoder = new TextEncoder();
const transformStream = new ReadableStream({
async start(controller) {
for await (const { event, data } of eventStream) {
if (event === "on_chat_model_stream") {
// Intermediate chat model generations will contain tool calls and no content
if (!!data.chunk.content) {
controller.enqueue(textEncoder.encode(data.chunk.content));
}
}
}
controller.close();
},
});

return new StreamingTextResponse(transformStream);
return LangChainAdapter.toDataStreamResponse(eventStream);
} else {
/**
* We could also pick intermediate steps out from `streamEvents` chunks, but
Expand Down
13 changes: 3 additions & 10 deletions app/api/chat/route.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
import { NextRequest, NextResponse } from "next/server";
import { Message as VercelChatMessage, StreamingTextResponse } from "ai";
import { Message as VercelChatMessage, LangChainAdapter } from "ai";

import { ChatOpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
import { HttpResponseOutputParser } from "langchain/output_parsers";

export const runtime = "edge";

Expand Down Expand Up @@ -47,26 +46,20 @@ export async function POST(req: NextRequest) {
model: "gpt-4o-mini",
});

/**
* Chat models stream message chunks rather than bytes, so this
* output parser handles serialization and byte-encoding.
*/
const outputParser = new HttpResponseOutputParser();

/**
* Can also initialize as:
*
* import { RunnableSequence } from "@langchain/core/runnables";
* const chain = RunnableSequence.from([prompt, model, outputParser]);
*/
const chain = prompt.pipe(model).pipe(outputParser);
const chain = prompt.pipe(model);

const stream = await chain.stream({
chat_history: formattedPreviousMessages.join("\n"),
input: currentMessageContent,
});

return new StreamingTextResponse(stream);
return LangChainAdapter.toDataStreamResponse(stream);
} catch (e: any) {
return NextResponse.json({ error: e.message }, { status: e.status ?? 500 });
}
Expand Down
1 change: 1 addition & 0 deletions app/structured_output/page.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ export default function AgentsPage() {
emptyStateComponent={InfoCard}
placeholder={`No matter what you type here, I'll always return the same JSON object with the same structure!`}
emoji="🧱"
streamProtocol="text"
/>
);
}
2 changes: 1 addition & 1 deletion components/ChatMessageBubble.tsx
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { cn } from "@/utils/cn";
import type { Message } from "ai/react";
import type { Message } from "@ai-sdk/react";

export function ChatMessageBubble(props: {
message: Message;
Expand Down
42 changes: 27 additions & 15 deletions components/ChatWindow.tsx
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
"use client";

import { type Message } from "ai";
import { useChat } from "ai/react";
import { UIMessage, type Message } from "ai";
import { useChat } from "@ai-sdk/react";
import { useState } from "react";
import type { FormEvent, ReactNode } from "react";
import { toast } from "sonner";
import { StickToBottom, useStickToBottomContext } from "use-stick-to-bottom";
import { ArrowDown, LoaderCircle, Paperclip } from "lucide-react";

import { ChatMessageBubble } from "@/components/ChatMessageBubble";
import { IntermediateStep } from "./IntermediateStep";
import { Button } from "./ui/button";
import { ArrowDown, LoaderCircle, Paperclip } from "lucide-react";
import { Checkbox } from "./ui/checkbox";
import { UploadDocumentsForm } from "./UploadDocumentsForm";
import {
Expand All @@ -22,6 +22,7 @@ import {
DialogTrigger,
} from "./ui/dialog";
import { cn } from "@/utils/cn";
import { logError } from "@/utils/log";

function ChatMessages(props: {
messages: Message[];
Expand Down Expand Up @@ -172,6 +173,7 @@ export function ChatWindow(props: {
emoji?: string;
showIngestForm?: boolean;
showIntermediateStepsToggle?: boolean;
streamProtocol?: "text" | "data";
}) {
const [showIntermediateSteps, setShowIntermediateSteps] = useState(
!!props.showIntermediateStepsToggle,
Expand Down Expand Up @@ -199,16 +201,22 @@ export function ChatWindow(props: {
});
}
},
streamMode: "text",
onError: (e) =>
streamProtocol: props.streamProtocol ?? undefined,
onError: (e) => {
logError("Error:", e);
toast.error(`Error while processing your request`, {
description: e.message,
}),
});
},
});

function isChatLoading(): boolean {
return chat.status === "streaming";
}

async function sendMessage(e: FormEvent<HTMLFormElement>) {
e.preventDefault();
if (chat.isLoading || intermediateStepsLoading) return;
if (isChatLoading() || intermediateStepsLoading) return;

if (!showIntermediateSteps) {
chat.handleSubmit(e);
Expand All @@ -223,6 +231,7 @@ export function ChatWindow(props: {
id: chat.messages.length.toString(),
content: chat.input,
role: "user",
parts: [{ type: "text", text: chat.input }],
});
chat.setMessages(messagesWithUserReply);

Expand All @@ -237,6 +246,7 @@ export function ChatWindow(props: {
setIntermediateStepsLoading(false);

if (!response.ok) {
logError("Error:", json.error);
toast.error(`Error while processing your request`, {
description: json.error,
});
Expand All @@ -251,23 +261,25 @@ export function ChatWindow(props: {
(responseMessage: Message) => {
return (
(responseMessage.role === "assistant" &&
!!responseMessage.tool_calls?.length) ||
responseMessage.role === "tool"
!!responseMessage.parts?.length) ||
responseMessage.role === "system"
);
},
);

const intermediateStepMessages = [];
const intermediateStepMessages: UIMessage[] = [];
for (let i = 0; i < toolCallMessages.length; i += 2) {
const aiMessage = toolCallMessages[i];
const toolMessage = toolCallMessages[i + 1];
const content = JSON.stringify({
action: aiMessage.parts?.[0],
observation: toolMessage.content,
});
intermediateStepMessages.push({
id: (messagesWithUserReply.length + i / 2).toString(),
role: "system" as const,
content: JSON.stringify({
action: aiMessage.tool_calls?.[0],
observation: toolMessage.content,
}),
content,
parts: [{ type: "text", text: content }],
});
}
const newMessages = messagesWithUserReply;
Expand Down Expand Up @@ -341,7 +353,7 @@ export function ChatWindow(props: {
id="show_intermediate_steps"
name="show_intermediate_steps"
checked={showIntermediateSteps}
disabled={chat.isLoading || intermediateStepsLoading}
disabled={isChatLoading() || intermediateStepsLoading}
onCheckedChange={(e) => setShowIntermediateSteps(!!e)}
/>
<label htmlFor="show_intermediate_steps" className="text-sm">
Expand Down
4 changes: 2 additions & 2 deletions components/IntermediateStep.tsx
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { useState } from "react";
import type { Message } from "ai/react";
import { cn } from "@/utils/cn";
import type { Message } from "@ai-sdk/react";
import { ChevronDown, ChevronUp } from "lucide-react";
import { cn } from "@/utils/cn";

export function IntermediateStep(props: { message: Message }) {
const parsedInput = JSON.parse(props.message.content);
Expand Down
8 changes: 4 additions & 4 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,7 @@
"@radix-ui/react-popover": "^1.1.4",
"@radix-ui/react-slot": "^1.1.1",
"@supabase/supabase-js": "^2.32.0",
"@types/node": "20.12.12",
"@types/react": "18.3.2",
"@types/react-dom": "18.3.0",
"ai": "^3.1.12",
"ai": "^4.1.41",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"langchain": "^0.3.19",
Expand All @@ -48,6 +45,9 @@
},
"devDependencies": {
"@next/bundle-analyzer": "^13.4.19",
"@types/node": "20.12.12",
"@types/react": "18.3.2",
"@types/react-dom": "18.3.0",
"autoprefixer": "10.4.14",
"eslint": "8.46.0",
"eslint-config-next": "13.4.12",
Expand Down
Loading