Skip to content

Commit

Permalink
Adds Next.js backend (langchain-ai#182)
Browse files Browse the repository at this point in the history
* Adds Next.js backend

* Update README

* Clarify README

* Fix typo

* Emoji
  • Loading branch information
jacoblee93 authored Sep 28, 2023
1 parent e8de91a commit 1f5596b
Show file tree
Hide file tree
Showing 8 changed files with 405 additions and 9,347 deletions.
35 changes: 21 additions & 14 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,26 +9,33 @@ The app leverages LangChain's streaming support and async API to update the page

## ✅ Running locally
1. Install backend dependencies: `poetry install`.
1. Make sure to enter your environment variables to configure the application:
```
export OPENAI_API_KEY=
export WEAVIATE_URL=
export WEAVIATE_API_KEY=
export RECORD_MANAGER_DB_URL=
# for tracing
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_ENDPOINT="https://api.smith.langchain.com"
export LANGCHAIN_API_KEY=
export LANGCHAIN_PROJECT=
```
1. Run `python ingest.py` to ingest LangChain docs data into the Weaviate vectorstore (only needs to be done once).
1. You can use other [Document Loaders](https://langchain.readthedocs.io/en/latest/modules/document_loaders.html) to load your own data into the vectorstore.
1. Run the backend with `make start`.
1. Make sure to enter your environment variables to configure the application:
```
export OPENAI_API_KEY=
export WEAVIATE_URL=
export WEAVIATE_API_KEY=
export RECORD_MANAGER_DB_URL=
# for tracing
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_ENDPOINT="https://api.smith.langchain.com"
export LANGCHAIN_API_KEY=
export LANGCHAIN_PROJECT=
```
1. Start the Python backend with `poetry run make start`.
1. Install frontend dependencies by running `cd chat-langchain`, then `yarn`.
1. Run the frontend with `yarn dev` for frontend.
1. Open [localhost:3000](http://localhost:3000) in your browser.

## ☕ Running locally (JS backend)
1. Follow the first three steps above to ingest LangChain docs data into the vectorstore.
1. Install frontend dependencies by running `cd chat-langchain`, then `yarn`.
1. Populate a `chat-langchain/.env.local` file with your own versions of keys from the `chat-langchain/.env.example` file, and set `NEXT_PUBLIC_API_BASE_URL` to `"http://localhost:3000/api"`.
1. Run the app with `yarn dev`.
1. Open [localhost:3000](http://localhost:3000) in your browser.

## 📚 Technical description

There are two components: ingestion and question-answering.
Expand Down
12 changes: 12 additions & 0 deletions chat-langchain/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
## For JS backend:

# LANGCHAIN_TRACING_V2=true
# LANGCHAIN_ENDPOINT="https://api.smith.langchain.com"
# LANGCHAIN_API_KEY="YOUR_LANGSMITH_KEY"
# LANGCHAIN_PROJECT="YOUR_PROJECT_NAME"

# NEXT_PUBLIC_API_BASE_URL="http://localhost:3000/api"
# OPENAI_API_KEY="YOUR_OPENAI_API_KEY"
# WEAVIATE_HOST="YOUR_WEAVIATE_HOST"
# WEAVIATE_API_KEY="YOUR_WEAVIATE_API_KEY"
# WEAVIATE_INDEX_NAME="YOUR_WEAVIATE_INDEX_NAME"
193 changes: 193 additions & 0 deletions chat-langchain/app/api/chat/route.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,193 @@
// JS backend not used by default, see README for instructions.

import { NextRequest, NextResponse } from "next/server";

import type { BaseLanguageModel } from "langchain/base_language";
import type { Document } from "langchain/document";
import type { BaseRetriever } from "langchain/schema/retriever";

import { RunnableSequence, RunnableMap } from "langchain/schema/runnable";
import { HumanMessage, AIMessage, BaseMessage } from "langchain/schema";
import { ChatOpenAI } from "langchain/chat_models/openai";
import { StringOutputParser } from "langchain/schema/output_parser";
import { PromptTemplate, ChatPromptTemplate, MessagesPlaceholder } from "langchain/prompts";

import weaviate from "weaviate-ts-client";
import { WeaviateStore } from "langchain/vectorstores/weaviate";
import { OpenAIEmbeddings } from "langchain/embeddings/openai";

export const runtime = "edge";

const RESPONSE_TEMPLATE = `You are an expert programmer and problem-solver, tasked to answer any question about Langchain. Using the provided context, answer the user's question to the best of your ability using the resources provided.
Generate a comprehensive and informative answer (but no more than 80 words) for a given question based solely on the provided search results (URL and content). You must only use information from the provided search results. Use an unbiased and journalistic tone. Combine search results together into a coherent answer. Do not repeat text. Cite search results using [\${{number}}] notation. Only cite the most relevant results that answer the question accurately. Place these citations at the end of the sentence or paragraph that reference them - do not put them all at the end. If different results refer to different entities within the same name, write separate answers for each entity.
If there is nothing in the context relevant to the question at hand, just say "Hmm, I'm not sure." Don't try to make up an answer.
Anything between the following \`context\` html blocks is retrieved from a knowledge bank, not part of the conversation with the user.
<context>
{context}
<context/>
REMEMBER: If there is no relevant information within the context, just say "Hmm, I'm not sure." Don't try to make up an answer. Anything between the preceding 'context' html blocks is retrieved from a knowledge bank, not part of the conversation with the user.`;

const REPHRASE_TEMPLATE = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone Question:`;

const getRetriever = async () => {
const client = weaviate.client({
scheme: "https",
host: process.env.WEAVIATE_HOST!,
apiKey: new weaviate.ApiKey(
process.env.WEAVIATE_API_KEY!
),
});
const vectorstore = await WeaviateStore.fromExistingIndex(new OpenAIEmbeddings({}), {
client,
indexName: process.env.WEAVIATE_INDEX_NAME!,
textKey: "text",
metadataKeys: ["source", "title"],
});
return vectorstore.asRetriever({ k: 6 });
};

const createRetrieverChain = (llm: BaseLanguageModel, retriever: BaseRetriever, useChatHistory: boolean) => {
if (!useChatHistory) {
return RunnableSequence.from([
({ question }) => question,
retriever
]);
} else {
const CONDENSE_QUESTION_PROMPT = PromptTemplate.fromTemplate(REPHRASE_TEMPLATE);
const condenseQuestionChain = RunnableSequence.from([
CONDENSE_QUESTION_PROMPT,
llm,
new StringOutputParser()
]).withConfig({
tags: ["CondenseQuestion"]
});
return condenseQuestionChain.pipe(retriever);
}
};

const formatDocs = (docs: Document[]) => {
return docs.map((doc, i) => `<doc id='${i}'>${doc.pageContent}</doc>`).join("\n");
};

const formatChatHistoryAsString = (history: BaseMessage[]) => {
return history.map((message) => `${message._getType()}: ${message.content}`).join('\n');
}

const createChain = (llm: BaseLanguageModel, retriever: BaseRetriever, useChatHistory: boolean) => {
const retrieverChain = createRetrieverChain(llm, retriever, useChatHistory).withConfig({ tags: ["FindDocs"] });
const context = new RunnableMap({
steps: {
context: RunnableSequence.from([
({question, chat_history}) => ({question, chat_history: formatChatHistoryAsString(chat_history)}),
retrieverChain,
formatDocs
]),
question: ({ question }) => question,
chat_history: ({ chat_history }) => chat_history
}
}).withConfig({ tags: ["RetrieveDocs"] });
const prompt = ChatPromptTemplate.fromMessages([
["system", RESPONSE_TEMPLATE],
new MessagesPlaceholder("chat_history"),
["human", "{question}"],
]);

const responseSynthesizerChain = prompt.pipe(llm).pipe(new StringOutputParser()).withConfig({
tags: ["GenerateResponse"],
});
return context.pipe(responseSynthesizerChain);
}

export async function POST(req: NextRequest) {
try {
const body = await req.json();
const question = body.message;
const chatHistory = (Array.isArray(body.history) && body.history) ?? [];
const conversationId = body.conversation_id;

if (question === undefined || typeof question !== "string") {
return NextResponse.json({ error: `Invalid "message" parameter.` }, { status: 400 });
}

const convertedChatHistory = [];
for (const historyMessage of chatHistory) {
if (historyMessage.human) {
convertedChatHistory.push(new HumanMessage({ content: historyMessage.human }));
} else if (historyMessage.ai) {
convertedChatHistory.push(new AIMessage({ content: historyMessage.ai }));
}
}

const metadata = { conversation_id: conversationId };
const llm = new ChatOpenAI({
modelName: "gpt-3.5-turbo-16k",
temperature: 0,
});
const retriever = await getRetriever();
const answerChain = createChain(llm, retriever, !!convertedChatHistory.length);

// Narrows streamed log output down to final output and the FindDocs tagged chain to
// selectively stream back sources.
const stream = await answerChain.streamLog({
question,
chat_history: convertedChatHistory,
}, {
metadata
}, {
includeTags: ["FindDocs"],
});

// Only return a selection of output to the frontend
const textEncoder = new TextEncoder();
const clientStream = new ReadableStream({
async pull(controller) {
const { value, done } = await stream.next();
if (done) {
controller.close();
} else if (value) {
let hasEnqueued = false;
for (const op of value.ops) {
if ("value" in op) {
if (op.path === "/logs/0/final_output" && Array.isArray(op.value.output)) {
const allSources = op.value.output.map((doc: Document) => {
return {
url: doc.metadata.source,
title: doc.metadata.title,
}
});
if (allSources.length) {
const chunk = textEncoder.encode(JSON.stringify({ sources: allSources }) + "\n");
controller.enqueue(chunk);
hasEnqueued = true;
}
} else if (op.path === "/streamed_output/-") {
const chunk = textEncoder.encode(JSON.stringify({tok: op.value}) + "\n");
controller.enqueue(chunk);
hasEnqueued = true;
} else if (op.path === "" && op.op === "replace") {
const chunk = textEncoder.encode(JSON.stringify({run_id: op.value.id}) + "\n");
controller.enqueue(chunk);
hasEnqueued = true;
}
}
}
// Pull must always enqueue a value
if (!hasEnqueued) {
controller.enqueue(textEncoder.encode(""));
}
}
},
});

return new Response(clientStream);
} catch (e: any) {
console.log(e);
return NextResponse.json({ error: e.message }, { status: 500 });
}
}
43 changes: 43 additions & 0 deletions chat-langchain/app/api/feedback/route.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
// JS backend not used by default, see README for instructions.

import { NextRequest, NextResponse } from "next/server";

import { Client } from "langsmith";

export const runtime = "edge";

const client = new Client();

export async function POST(req: NextRequest) {
try {
const body = await req.json();
const { run_id, key = "user_score", ...rest } = body;
if (!run_id) {
return NextResponse.json({ error: "No LangSmith run ID provided" }, { status: 400 });
}

await client.createFeedback(run_id, key, rest);

return NextResponse.json({ result: "posted feedback successfully" }, { status: 200 });
} catch (e: any) {
console.log(e);
return NextResponse.json({ error: e.message }, { status: 500 });
}
}

export async function PATCH(req: NextRequest) {
try {
const body = await req.json();
const { feedback_id, score, comment } = body;
if (feedback_id === undefined) {
return NextResponse.json({ error: "No feedback ID provided" }, { status: 400 });
}

await client.updateFeedback(feedback_id, { score, comment });

return NextResponse.json({ result: "patched feedback successfully" }, { status: 200 });
} catch (e: any) {
console.log(e);
return NextResponse.json({ error: e.message }, { status: 500 });
}
}
42 changes: 42 additions & 0 deletions chat-langchain/app/api/get_trace/route.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
// JS backend not used by default, see README for instructions.

import { NextRequest, NextResponse } from "next/server";

import { Client } from "langsmith";

export const runtime = "edge";

const client = new Client();

const pollForRun = async (runId: string, retryCount = 0): Promise<string> => {
await new Promise((resolve) => setTimeout(resolve, retryCount * retryCount * 100));
try {
await client.readRun(runId);
} catch (e) {
return pollForRun(runId, retryCount + 1);
}
try {
const sharedLink = await client.readRunSharedLink(runId);
if (!sharedLink) {
throw new Error("Run is not shared.");
}
return sharedLink;
} catch (e) {
return client.shareRun(runId);
}
}

export async function POST(req: NextRequest) {
try {
const body = await req.json();
const { run_id } = body;
if (run_id === undefined) {
return NextResponse.json({ error: "No run ID provided" }, { status: 400 });
}
const response = await pollForRun(run_id);
return NextResponse.json(response, { status: 200 });
} catch (e: any) {
console.log(e);
return NextResponse.json({ error: e.message }, { status: 500 });
}
}
Loading

0 comments on commit 1f5596b

Please sign in to comment.