forked from raddka/terminal-llm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathllm_local_functions.py
90 lines (77 loc) · 3.68 KB
/
llm_local_functions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import os, sys, re, csv, json
from modules.functions import *
from modules.helpers import *
from print_color import print
from llama_cpp import Llama
#Import model and initialize
model_name = model_selector()
n_gpu, n_context = llama_args()
llm = Llama(model_path="./models/" + model_name, chat_format="llama-2", n_gpu_layers=n_gpu, n_ctx = n_context)
#LLM Selection + History init
llm_name = char_selector()
with open('llm_config.json', 'r') as file:
llm_config = json.load(file)
#Load existing history
history_path = os.path.join("history", f'history_{llm_name}.csv')
try:
with open(history_path, 'r', newline='') as file:
reader = csv.DictReader(file)
history_dict = list(reader)
for key in history_dict:
history = ''
history = history + prompter(key, history_dict[key])
except FileNotFoundError:
history = ''
history_dict = []
#Chat
while True:
role_select = input("Select role - system/user :> ")
if role_select == 'system':
system_message = input('System:> ')
history, history_dict = history_update_print('assistant', history, history_dict, assistant_message)
user_message = input("User:> ")
if user_message == 'exit':
sys.exit(0)
history, history_dict = history_update_print('assistant', history, history_dict, assistant_message)
output = llm(prompt=promp_generator(history),
max_tokens=llm_config["max_tokens"],
stop=llm_config["stop"],
temperature=llm_config["temperature"],
top_p=llm_config["top_p"],
top_k=llm_config["top_k"],
min_p=llm_config["min_p"],
repeat_penalty=llm_config["repeat_penalty"])
assistant_message= output["choices"][0]["text"]
history, history_dict = history_update_print('assistant', history, history_dict, assistant_message, True, llm_name)
print(assistant_message, tag=llm_name, tag_color='magenta', color='cyan')
if '/function' in assistant_message:
matches = re.findall(r'\[([^]]+)\]', assistant_message)
extracted_info = {}
function_name = matches[0]
try:
args_str = matches[1]
if function_name in globals() and callable(globals()[function_name]):
func_to_call = globals()[function_name]
args = [arg.strip() for arg in args_str.split(',')]
response = func_to_call(*args)
else:
response = 'Function not found'
except:
if function_name in globals() and callable(globals()[function_name]):
func_to_call = globals()[function_name]
response = func_to_call()
else:
response = 'Function not found'
history, history_dict = history_update_print('assistant', history, history_dict, assistant_message)
print(response, tag='System', tag_color='yellow', color='white')
output = llm(prompt=promp_generator(history),
max_tokens=llm_config["max_tokens"],
stop=llm_config["stop"],
temperature=llm_config["temperature"],
top_p=llm_config["top_p"],
top_k=llm_config["top_k"],
min_p=llm_config["min_p"],
repeat_penalty=llm_config["repeat_penalty"])
assistant_message= output["choices"][0]["text"]
history, history_dict = history_update_print('assistant', history, history_dict, assistant_message, True, llm_name)
print(assistant_message, tag=llm_name, tag_color='magenta', color='cyan')