Skip to content

Commit

Permalink
Update the example
Browse files Browse the repository at this point in the history
  • Loading branch information
tqchen committed May 25, 2023
1 parent 5075732 commit 6e0dc08
Show file tree
Hide file tree
Showing 6 changed files with 114 additions and 50 deletions.
50 changes: 23 additions & 27 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,55 +18,51 @@ our companion project that runs LLMs natively on iPhone and other native local e
## Get Started

WebLLM offers a minimalist and modular interface to access the chatbot in the browser.
The following code demonstrates the basic usage.

```typescript
import { ChatModule } from "@mlc-ai/web-llm";

async function main() {
const chat = new ChatModule();
// load a prebuilt model
await chat.reload("RedPajama-INCITE-Chat-3B-v1-q4f32_0");
// generate a reply base on input
const prompt = "What is the capital of Canada?";
const reply = await chat.generate(prompt);
console.log(reply);
}
```

The WebLLM package itself does not come with UI, and is designed in a
modular way to hook to any of the UI components. The following code snippet
contains part of the program that generates a streaming response on a webpage.
demonstrate a simple example that generates a streaming response on a webpage.
You can check out [examples/get-started](examples/get-started/) to see the complete example.

```typescript
import * as webllm from "@mlc-ai/web-llm";

// We use label to intentionally keep it simple
function setLabel(id: string, text: string) {
const label = document.getElementById(id);
if (label == null) {
throw Error("Cannot find label " + id);
}
label.innerText = text;
}

async function main() {
// create a ChatModule,
const chat = new ChatModule();
const chat = new webllm.ChatModule();
// This callback allows us to report initialization progress
chat.setInitProgressCallback((report: InitProgressReport) => {
chat.setInitProgressCallback((report: webllm.InitProgressReport) => {
setLabel("init-label", report.text);
});
// pick a model. Here we use red-pajama
const localId = "RedPajama-INCITE-Chat-3B-v1-q4f32_0";
await chat.reload(localId);
// You can also try out "RedPajama-INCITE-Chat-3B-v1-q4f32_0"
await chat.reload("vicuna-v1-7b-q4f32_0");

// callback to refresh the streaming response
const generateProgressCallback = (_step: number, message: string) => {
setLabel("generate-label", message);
};

const prompt0 = "What is the capital of Canada?";
// generate response
setLabel("prompt-label", prompt0);
const reply0 = await chat.generate(prompt0, generateProgressCallback);
console.log(reply0);

const prompt1 = "How about France?";
const reply1 = await chat.generate(prompt1, generateProgressCallback)
const prompt1 = "Can you write a poem about it?";
setLabel("prompt-label", prompt1);
const reply1 = await chat.generate(prompt1, generateProgressCallback);
console.log(reply1);

// We can print out the status
console.log(await chat.runtimeStatsText());
}

main();
```

Finally, you can find a complete
Expand Down
2 changes: 1 addition & 1 deletion examples/get-started/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ To try it out, you can do the following steps
- `@mlc-ai/web-llm` points to a valid npm version e.g.
```js
"dependencies": {
"@mlc-ai/web-llm": "^0.1.0"
"@mlc-ai/web-llm": "^0.1.3"
}
```
Try this option if you would like to use WebLLM without building it yourself.
Expand Down
27 changes: 9 additions & 18 deletions examples/get-started/src/get_started.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import { ChatModule, InitProgressReport } from "@mlc-ai/web-llm";
import * as webllm from "@mlc-ai/web-llm";

// We use label to intentionally keep it simple
function setLabel(id: string, text: string) {
const label = document.getElementById(id);
if (label == null) {
Expand All @@ -10,37 +9,29 @@ function setLabel(id: string, text: string) {
}

async function main() {
// create a ChatModule,
const chat = new ChatModule();
const chat = new webllm.ChatModule();

// This callback allows us to report initialization progress
chat.setInitProgressCallback((report: InitProgressReport) => {
chat.setInitProgressCallback((report: webllm.InitProgressReport) => {
setLabel("init-label", report.text);
});
// pick a model, here we use red-pajama
// at any time point, you can call reload
// to switch the underlying model
const localId = "RedPajama-INCITE-Chat-3B-v1-q4f32_0";
await chat.reload(localId);

// this callback allows us to stream result back
await chat.reload("vicuna-v1-7b-q4f32_0");

const generateProgressCallback = (_step: number, message: string) => {
setLabel("generate-label", message);
};

const prompt0 = "What is the capital of Canada?";
setLabel("prompt-label", prompt0);

// generate response
const reply0 = await chat.generate(prompt0, generateProgressCallback);
console.log(reply0);

const prompt1 = "How about France?";
const prompt1 = "Can you write a poem about it?";
setLabel("prompt-label", prompt1);
const reply1 = await chat.generate(prompt1, generateProgressCallback)
const reply1 = await chat.generate(prompt1, generateProgressCallback);
console.log(reply1);

// We can print out the statis
console.log(await chat.runtimeStatsText());
}

main()
main();
2 changes: 1 addition & 1 deletion examples/simple-chat/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ chat app based on WebLLM. To try it out, you can do the following steps
- Option 1: `@mlc-ai/web-llm` points to a valid npm version e.g.
```js
"dependencies": {
"@mlc-ai/web-llm": "^0.1.0"
"@mlc-ai/web-llm": "^0.1.3"
}
```
Try this option if you would like to use WebLLM.
Expand Down
75 changes: 75 additions & 0 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 5 additions & 3 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -22,18 +22,20 @@
"license": "Apache-2.0",
"homepage": "https://github.com/mlc-ai/web-llm",
"devDependencies": {
"@mlc-ai/web-tokenizers": "^0.1.0",
"@rollup/plugin-commonjs": "^20.0.0",
"@rollup/plugin-node-resolve": "^13.0.4",
"@typescript-eslint/eslint-plugin": "^5.59.6",
"@typescript-eslint/parser": "^5.59.6",
"@webgpu/types": "^0.1.24",
"buffer": "^5.7.1",
"eslint": "^8.41.0",
"process": "^0.11.10",
"rollup": "^2.56.2",
"rollup-plugin-ignore": "^1.0.10",
"rollup-plugin-typescript2": "^0.34.1",
"tslib": "^2.3.1",
"@webgpu/types": "^0.1.24",
"tvmjs": "file:./tvm_home/web",
"typescript": "^4.9.5",
"@mlc-ai/web-tokenizers": "^0.1.0"
"typescript": "^4.9.5"
}
}

0 comments on commit 6e0dc08

Please sign in to comment.