-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
68 lines (54 loc) · 2 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import torch, json
from transformers import AutoModelForCausalLM, AutoTokenizer
from flask import Flask, request, render_template
app = Flask(__name__)
model_path = "whiterabbitneo/WhiteRabbitNeo-33B-v-1"
model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch.float16,
device_map="cpu", # Изменено на CPU
load_in_4bit=False,
load_in_8bit=True,
trust_remote_code=True,
)
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
def generate_text(instruction):
tokens = tokenizer.encode(instruction)
tokens = torch.LongTensor(tokens).unsqueeze(0)
tokens = tokens.to("cpu") # Изменено на CPU
instance = {
"input_ids": tokens,
"top_p": 1.0,
"temperature": 0.5,
"generate_len": 1024,
"top_k": 50,
}
length = len(tokens[0])
with torch.no_grad():
rest = model.generate(
input_ids=tokens,
max_length=length + instance["generate_len"],
use_cache=True,
do_sample=True,
top_p=instance["top_p"],
temperature=instance["temperature"],
top_k=instance["top_k"],
num_return_sequences=1,
)
output = rest[0][length:]
string = tokenizer.decode(output, skip_special_tokens=True)
answer = string.split("USER:")[0].strip()
return f"{answer}"
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
input_text = request.form['input_text']
prompt = request.form['prompt']
# Объединение промта и входного текста
combined_input = prompt + " " + input_text
# Генерация продолжения текста
answer = generate_text(combined_input)
return render_template('index.html', input_text=input_text, prompt=prompt, answer=answer)
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)