forked from huangjia2019/langchain-in-action
-
Notifications
You must be signed in to change notification settings - Fork 0
/
test_00016.py
115 lines (90 loc) · 3.01 KB
/
test_00016.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
from typing import List, Literal, Sequence
from dotenv import load_dotenv # 用于加载环境变量
from langchain.globals import set_debug, set_verbose
from langchain_core.messages import (
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
trim_messages,
)
from langchain_core.messages.base import BaseMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableLambda
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import END, START, MessageGraph, MessagesState, StateGraph
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode
from typing_extensions import Annotated, TypedDict
set_debug(True)
set_verbose(True)
load_dotenv() # 加载 .env 文件中的环境变量
model = ChatOpenAI(model="gpt-3.5-turbo")
# ref:https://python.langchain.com/docs/tutorials/chatbot/#managing-conversation-history
trimmer = trim_messages(
max_tokens=165,
strategy="last",
token_counter=model,
include_system=True,
allow_partial=False,
start_on="human",
)
messages = [
SystemMessage(content="you're a good assistant"),
HumanMessage(content="hi! I'm bob"),
AIMessage(content="hi!"),
HumanMessage(content="I like vanilla ice cream"),
AIMessage(content="nice"),
HumanMessage(content="whats 2 + 2"),
AIMessage(content="4"),
HumanMessage(content="thanks"),
AIMessage(content="no problem!"),
HumanMessage(content="having fun?"),
AIMessage(content="yes!"),
]
msgObj = trimmer.invoke(messages)
print(msgObj)
class State(TypedDict):
messages: Annotated[Sequence[BaseMessage], add_messages]
language: str
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant. Answer all questions to the best of your ability in {language}.",
),
MessagesPlaceholder(variable_name="messages"),
]
)
workflow = StateGraph(state_schema=State)
def call_model(state: State):
chain = prompt | model
trimmed_messages = trimmer.invoke(state["messages"])
response = chain.invoke(
{"messages": trimmed_messages, "language": state["language"]}
)
return {"messages": [response]}
workflow.add_edge(START, "model")
workflow.add_node("model", call_model)
memory = MemorySaver()
app = workflow.compile(checkpointer=memory)
config = {"configurable": {"thread_id": "abc567"}}
query = "What is my name?"
language = "Chinese"
input_messages = messages + [HumanMessage(query)]
output = app.invoke(
{"messages": input_messages, "language": language},
config,
)
output["messages"][-1].pretty_print()
config = {"configurable": {"thread_id": "abc678"}}
query = "What math problem did I ask?"
language = "Chinese"
input_messages = messages + [HumanMessage(query)]
output = app.invoke(
{"messages": input_messages, "language": language},
config,
)
output["messages"][-1].pretty_print()