-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathllms.py
57 lines (54 loc) · 1.82 KB
/
llms.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import google.generativeai as palm
import os
from dotenv import load_dotenv
from langchain.llms import VertexAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from kor.extraction import create_extraction_chain, Object, Text
from kor.nodes import Object, Text, Number
load_dotenv()
#def get_response(message: str) -> str:
#palm.configure(api_key=os.environ['PALM_APIKEY'])
#response = palm.chat(context="",messages=[message])
#return response.last
def get_response(message: str) -> str:
llm = VertexAI()
schema = Object(
id="player",
description=(
"User is controlling a music player to select songs, pause or start them or play"
" music by a particular artist."
),
attributes=[
Text(
id="song",
description="User wants to play this song",
examples=[],
many=True,
),
Text(
id="album",
description="User wants to play this album",
examples=[],
many=True,
),
Text(
id="artist",
description="Music by the given artist",
examples=[("Songs by paul simon", "paul simon")],
many=True,
),
Text(
id="action",
description="Action to take one of: `play`, `stop`, `next`, `previous`.",
examples=[
("Please stop the music", "stop"),
("play something", "play"),
("play a song", "play"),
("next song", "next"),
],
),
],
many=True,
)
chain = create_extraction_chain(llm, schema, encoder_or_encoder_class="json")