Skip to content

Commit

Permalink
Increase recordings and adjust OpenAI parameters
Browse files Browse the repository at this point in the history
- Add GPT3Tokenizer library for tokenization
- Add a default response when GPT response is empty
- Add a check for blocked words in GPT response
- Change temperature parameter to `0.6`
- Add error handling for OpenAI API request
- Return OpenAI response as a string
- Add check for token limit before adding a user or assistant message
- Delete messages when token limit is exceeded

[src/utils.ts]
- Add the GPT3Tokenizer library for tokenization
- Add a function to calculate the number of tokens in a chat message
- Add a function to check if the number of tokens exceeds the limit for the current model
[src/bot.ts]
- Add a default response when the GPT response is empty
- Add a check for blocked words in the GPT response
[src/openai.ts]
- Change the temperature parameter to `0.6`
- Add error handling for the OpenAI API request
- Return the OpenAI response as a string
[package.json]
- Add `gpt3-tokenizer` package
[src/data.ts]
- Add a check for token limit before adding a user or assistant message
- Delete messages starting from the second one if token limit is exceeded
- Import `isTokenOverLimit` from `./utils.js`
- Remove initialization of `initState`
[package-lock.json]
- Add `gpt3-tokenizer` package
- Add `array-keyed-map` package
- Increase version of `dotenv` package
- Increase version of `openai` package
- Increase version of `google-protobuf` package
- Increase returned recordings from `10` to `100`
  • Loading branch information
RealTong committed Mar 23, 2023
1 parent 68fe980 commit 4d39ea4
Show file tree
Hide file tree
Showing 6 changed files with 89 additions and 13 deletions.
30 changes: 30 additions & 0 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
"async-retry": "^1.3.3",
"dotenv": "^16.0.3",
"execa": "^6.1.0",
"gpt3-tokenizer": "^1.1.5",
"openai": "^3.2.1",
"qrcode": "^1.5.1",
"uuid": "^9.0.0",
Expand Down
7 changes: 5 additions & 2 deletions src/bot.ts
Original file line number Diff line number Diff line change
Expand Up @@ -130,8 +130,11 @@ export class ChatGPTBot {
}
async getGPTMessage(talkerName: string,text: string): Promise<string> {
let gptMessage = await chatgpt(talkerName,text);
DBUtils.addAssistantMessage(talkerName,gptMessage);
return gptMessage;
if (gptMessage !=="") {
DBUtils.addAssistantMessage(talkerName,gptMessage);
return gptMessage;
}
return "Sorry, please try again later. 😔";
}
// Check if the message returned by chatgpt contains masked words]
checkChatGPTBlockWords(message: string): boolean {
Expand Down
15 changes: 9 additions & 6 deletions src/data.ts
Original file line number Diff line number Diff line change
@@ -1,15 +1,10 @@
import {ChatCompletionRequestMessage, ChatCompletionRequestMessageRoleEnum} from "openai";
import {User} from "./interface";
import {isTokenOverLimit} from "./utils.js";

/**
* 使用内存作为数据库
*/
export const initState: Array<ChatCompletionRequestMessage> = new Array(
{
"role": ChatCompletionRequestMessageRoleEnum.System,
"content": "You are a helpful assistant."
}
)

class DB {
private static data: User[] = [];
Expand Down Expand Up @@ -75,6 +70,10 @@ class DB {
public addUserMessage(username: string, message: string): void {
const user = this.getUserByUsername(username);
if (user) {
while (isTokenOverLimit(user.chatMessage)){
// 删除从第2条开始的消息(因为第一条是prompt)
user.chatMessage.splice(1,1);
}
user.chatMessage.push({
role: ChatCompletionRequestMessageRoleEnum.User,
content: message,
Expand All @@ -90,6 +89,10 @@ class DB {
public addAssistantMessage(username: string, message: string): void {
const user = this.getUserByUsername(username);
if (user) {
while (isTokenOverLimit(user.chatMessage)){
// 删除从第2条开始的消息(因为第一条是prompt)
user.chatMessage.splice(1,1);
}
user.chatMessage.push({
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: message,
Expand Down
18 changes: 13 additions & 5 deletions src/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,20 @@ async function chatgpt(username:string,message: string): Promise<string> {
model: "gpt-3.5-turbo",
messages: messages,
temperature: 0.6
}).then((res) => res.data).catch((err) => console.log(err));
if (response) {
return (response.choices[0].message as any).content.replace(/^\n+|\n+$/g, "");
} else {
return "Something went wrong"
});
let assistantMessage = "";
try {
if (response.status === 200) {
assistantMessage = response.data.choices[0].message?.content.replace(/^\n+|\n+$/g, "") as string;
}else{
console.log(`Something went wrong,Code: ${response.status}, ${response.statusText}`)
}
}catch (e:any) {
if (e.request){
console.log("请求出错");
}
}
return assistantMessage;
}

/**
Expand Down
31 changes: 31 additions & 0 deletions src/utils.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import {ChatCompletionRequestMessage} from "openai";
import GPT3TokenizerImport from 'gpt3-tokenizer';
import {config} from "./config.js";

const GPT3Tokenizer: typeof GPT3TokenizerImport =
typeof GPT3TokenizerImport === 'function'
? GPT3TokenizerImport
: (GPT3TokenizerImport as any).default;

// https://github.com/chathub-dev/chathub/blob/main/src/app/bots/chatgpt-api/usage.ts
const tokenizer = new GPT3Tokenizer({ type: 'gpt3' })
function calTokens(chatMessage:ChatCompletionRequestMessage[]):number {
let count = 0
for (const msg of chatMessage) {
count += countTokens(msg.content)
count += countTokens(msg.role)
}
return count + 2
}
function countTokens(str: string):number {
const encoded = tokenizer.encode(str)
return encoded.bpe.length
}

export function isTokenOverLimit(chatMessage:ChatCompletionRequestMessage[]): boolean {
let limit = 4096;
if (config.model==="gpt-3.5-turbo" || config.model==="gpt-3.5-turbo-0301") {
limit = 4096;
}
return calTokens(chatMessage) > limit;
}

0 comments on commit 4d39ea4

Please sign in to comment.