forked from mlc-ai/web-llm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtypes.ts
89 lines (77 loc) · 2.35 KB
/
types.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import { AppConfig, ChatConfig } from "./config"
/**
* Custom options that can be used to
* override known config values.
*/
export interface ChatOptions extends Partial<ChatConfig> {}
/**
* Report during intialization.
*/
export interface InitProgressReport {
progress: number;
timeElapsed: number;
text: string;
}
/**
* Callbacks used to report initialization process.
*/
export type InitProgressCallback = (report: InitProgressReport) => void;
/**
* Callbacks used to report initialization process.
*/
export type GenerateProgressCallback = (step: number, currentMessage: string) => void;
/**
* Common interface of chat module that UI can interact with
*/
export interface ChatInterface {
/**
* Set an initialization progress callback function
* which reports the progress of model loading.
*
* This function can be useful to implement an UI that
* update as we loading the model.
*
* @param initProgressCallback The callback function
*/
setInitProgressCallback: (initProgressCallback: InitProgressCallback) => void;
/**
* Reload the chat with a new model.
*
* @param localIdOrUrl local_id of the model or model artifact url.
* @param chatOpts Extra options to overide chat behavior.
* @param appConfig Override the app config in this load.
* @returns A promise when reload finishes.
* @note This is an async function.
*/
reload: (localIdOrUrl: string, chatOpts?: ChatOptions, appConfig?: AppConfig) => Promise<void>;
/**
* Generate a response for a given input.
*
* @param input The input prompt.
* @param progressCallback Callback that is being called to stream intermediate results.
* @param streamInterval callback interval to call progresscallback
* @returns The final result.
*/
generate: (
input: string,
progressCallback?: GenerateProgressCallback,
streamInterval?: number,
) => Promise<string>;
/**
* @returns A text summarizing the runtime stats.
* @note This is an async function
*/
runtimeStatsText: () => Promise<string>;
/**
* Interrupt the generate process if it is already running.
*/
interruptGenerate: () => void;
/**
* Explicitly unload the current model and release the related resources.
*/
unload: () => Promise<void>;
/**
* Reset the current chat session by clear all memories.
*/
resetChat: () => Promise<void>;
}