forked from i-am-alice/3rd-devs
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapp.ts
47 lines (38 loc) · 1.62 KB
/
app.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import express from 'express';
import type { ChatCompletionMessageParam } from "openai/resources/chat/completions";
import { OpenAIService } from './OpenAIService';
/*
Start Express server
*/
const app = express();
const port = 3000;
app.use(express.json());
app.listen(port, () => console.log(`Server running at http://localhost:${port}. Listening for POST /api/chat requests`));
const openAIService = new OpenAIService();
app.post('/api/chat', async (req, res) => {
const { messages, model = "gpt-4o" }: { messages: ChatCompletionMessageParam[], model?: string } = req.body;
if (!messages || messages.length === 0) {
return res.status(400).json({ error: 'Messages are required' });
}
try {
const modelContextLength = 128000;
const maxOutputTokens = 50;
const inputTokens = await openAIService.countTokens(messages, model);
if (inputTokens + maxOutputTokens > modelContextLength) {
return res.status(400).json({ error: `No space left for response. Input tokens: ${inputTokens}, Context length: ${modelContextLength}` });
}
console.log(`Input tokens: ${inputTokens}, Max tokens: ${maxOutputTokens}, Model context length: ${modelContextLength}, Tokens left: ${modelContextLength - (inputTokens + maxOutputTokens)}`);
const fullResponse = await openAIService.continuousCompletion({
messages,
model,
maxTokens: maxOutputTokens
});
res.json({
role: "assistant",
content: fullResponse
});
} catch (error) {
console.error('Error:', JSON.stringify(error, null, 2));
res.status(500).json({ error: 'An error occurred while processing your request' });
}
});