Skip to content

Commit

Permalink
feat: generation works
Browse files Browse the repository at this point in the history
  • Loading branch information
zanussbaum committed Mar 25, 2023
1 parent 897d818 commit b6e3ba0
Show file tree
Hide file tree
Showing 3 changed files with 82 additions and 0 deletions.
25 changes: 25 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1 +1,26 @@
# gpt4all



# Setup

Clone the repo

`git clone --recurse-submodules [email protected]:nomic-ai/gpt4all.git`

Setup the environment

```
python -m pip install -r requirements.txt
cd transformers
pip install -e .
cd ../peft
pip install -e .
```


## Generate

`python generate.py --config configs/generate/generate.yaml --prompt "Write a script to reverse a string in Python`
8 changes: 8 additions & 0 deletions configs/generate/generate.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# model/tokenizer
model_name: "zpn/llama-7b"
tokenizer_name: "zpn/llama-7b"
lora: true
lora_path: "nomic-ai/vicuna-lora-512"

max_new_tokens: 512
temperature: 0
49 changes: 49 additions & 0 deletions generate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModelForCausalLM
from read import read_config
from argparse import ArgumentParser
import torch
import time


def generate(tokenizer, prompt, model, config):
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)

outputs = model.generate(input_ids=input_ids, max_new_tokens=config["max_new_tokens"], temperature=config["temperature"])

decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)

return decoded[len(prompt):]


def setup_model(config):
model = AutoModelForCausalLM.from_pretrained(config["model_name"], device_map="auto", torch_dtype=torch.float16)
tokenizer = AutoTokenizer.from_pretrained(config["tokenizer_name"])

if config["lora"]:
model = PeftModelForCausalLM.from_pretrained(model, config["lora_path"], device_map="auto", torch_dtype=torch.float16)
model.to(dtype=torch.float16)

print(f"Mem needed: {model.get_memory_footprint() / 1024 / 1024 / 1024:.2f} GB")

return model, tokenizer



if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--config", type=str, required=True)
parser.add_argument("--prompt", type=str, required=True)

args = parser.parse_args()

config = read_config(args.config)

print("setting up model")
model, tokenizer = setup_model(config)

print("generating")
start = time.time()
generation = generate(tokenizer, args.prompt, model, config)
print(f"done in {time.time() - start:.2f}s")
print(generation)

0 comments on commit b6e3ba0

Please sign in to comment.