forked from abacaj/replit-3B-inference
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathinference.py
74 lines (61 loc) · 1.77 KB
/
inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import os
from dataclasses import dataclass, asdict
from ctransformers import AutoModelForCausalLM, AutoConfig
@dataclass
class GenerationConfig:
temperature: float
top_k: int
top_p: float
repetition_penalty: float
max_new_tokens: int
seed: int
reset: bool
stream: bool
threads: int
stop: list[str]
def format_prompt(user_prompt: str):
return f"""### Instruction:
{user_prompt}
### Response:"""
def generate(
llm: AutoModelForCausalLM,
generation_config: GenerationConfig,
user_prompt: str,
):
"""run model inference, will return a Generator if streaming is true"""
return llm(
format_prompt(
user_prompt,
),
**asdict(generation_config),
)
if __name__ == "__main__":
config = AutoConfig.from_pretrained(
"teknium/Replit-v2-CodeInstruct-3B", context_length=2048
)
llm = AutoModelForCausalLM.from_pretrained(
os.path.abspath("models/replit-v2-codeinstruct-3b.q4_1.bin"),
model_type="replit",
config=config,
)
generation_config = GenerationConfig(
temperature=0.2,
top_k=50,
top_p=0.9,
repetition_penalty=1.0,
max_new_tokens=512, # adjust as needed
seed=42,
reset=True, # reset history (cache)
stream=True, # streaming per word/token
threads=int(os.cpu_count() / 6), # adjust for your CPU
stop=["<|endoftext|>"],
)
user_prefix = "[user]: "
assistant_prefix = f"[assistant]:"
while True:
user_prompt = input(user_prefix)
generator = generate(llm, generation_config, user_prompt.strip())
print(assistant_prefix, end="\n", flush=True)
for word in generator:
print(word, end="", flush=True)
print("")