Skip to content

Commit

Permalink
feat: fixes and documenting methods
Browse files Browse the repository at this point in the history
  • Loading branch information
transitive-bullshit committed Dec 7, 2022
1 parent 4693de9 commit 58795f4
Show file tree
Hide file tree
Showing 6 changed files with 141 additions and 34 deletions.
9 changes: 5 additions & 4 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
"dependencies": {
"eventsource-parser": "^0.0.5",
"expiry-map": "^2.0.0",
"p-timeout": "^6.0.0",
"remark": "^14.0.2",
"strip-markdown": "^5.0.0",
"uuid": "^9.0.0"
Expand All @@ -62,6 +63,9 @@
"typedoc-plugin-markdown": "^3.13.6",
"typescript": "^4.9.3"
},
"optionalDependencies": {
"undici": "^5.13.0"
},
"lint-staged": {
"*.{ts,tsx}": [
"prettier --write"
Expand Down Expand Up @@ -89,8 +93,5 @@
"ai",
"ml",
"bot"
],
"optionalDependencies": {
"undici": "^5.13.0"
}
]
}
7 changes: 7 additions & 0 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

51 changes: 48 additions & 3 deletions src/chatgpt-api.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,14 @@ test('ChatGPTAPI valid session token', async (t) => {

await t.notThrowsAsync(
(async () => {
const api = new ChatGPTAPI({ sessionToken: process.env.SESSION_TOKEN })
const chatgpt = new ChatGPTAPI({
sessionToken: process.env.SESSION_TOKEN
})

// Don't make any real API calls using our session token if we're running on CI
if (!isCI) {
await api.ensureAuth()
const response = await api.sendMessage('test')
await chatgpt.ensureAuth()
const response = await chatgpt.sendMessage('test')
console.log('chatgpt response', response)

t.truthy(response)
Expand All @@ -68,3 +70,46 @@ if (!isCI) {
)
})
}

if (!isCI) {
test('ChatGPTAPI timeout', async (t) => {
t.timeout(30 * 1000) // 30 seconds

await t.throwsAsync(
async () => {
const chatgpt = new ChatGPTAPI({
sessionToken: process.env.SESSION_TOKEN
})

await chatgpt.sendMessage('test', {
timeoutMs: 1
})
},
{
message: 'ChatGPT timed out waiting for response'
}
)
})

test('ChatGPTAPI abort', async (t) => {
t.timeout(30 * 1000) // 30 seconds

await t.throwsAsync(
async () => {
const chatgpt = new ChatGPTAPI({
sessionToken: process.env.SESSION_TOKEN
})

const abortController = new AbortController()
setTimeout(() => abortController.abort(new Error('testing abort')), 10)

await chatgpt.sendMessage('test', {
abortSignal: abortController.signal
})
},
{
message: 'testing abort'
}
)
})
}
102 changes: 77 additions & 25 deletions src/chatgpt-api.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import ExpiryMap from 'expiry-map'
import pTimeout, { TimeoutError } from 'p-timeout'
import { v4 as uuidv4 } from 'uuid'

import * as types from './types'
Expand All @@ -18,8 +19,9 @@ export class ChatGPTAPI {
protected _backendApiBaseUrl: string
protected _userAgent: string

// stores access tokens for up to 10 seconds before needing to refresh
protected _accessTokenCache = new ExpiryMap<string, string>(10 * 1000)
// Stores access tokens for `accessTokenTTL` milliseconds before needing to refresh
// (defaults to 60 seconds)
protected _accessTokenCache: ExpiryMap<string, string>

/**
* Creates a new client wrapper around the unofficial ChatGPT REST API.
Expand All @@ -28,6 +30,7 @@ export class ChatGPTAPI {
* @param apiBaseUrl - Optional override; the base URL for ChatGPT webapp's API (`/api`)
* @param backendApiBaseUrl - Optional override; the base URL for the ChatGPT backend API (`/backend-api`)
* @param userAgent - Optional override; the `user-agent` header to use with ChatGPT requests
* @param accessTokenTTL - Optional override; how long in milliseconds access tokens should last before being forcefully refreshed
*/
constructor(opts: {
sessionToken: string
Expand All @@ -43,13 +46,17 @@ export class ChatGPTAPI {

/** @defaultValue `'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'` **/
userAgent?: string

/** @defaultValue 60000 (60 seconds) */
accessTokenTTL?: number
}) {
const {
sessionToken,
markdown = true,
apiBaseUrl = 'https://chat.openai.com/api',
backendApiBaseUrl = 'https://chat.openai.com/backend-api',
userAgent = USER_AGENT
userAgent = USER_AGENT,
accessTokenTTL = 60000 // 60 seconds
} = opts

this._sessionToken = sessionToken
Expand All @@ -58,31 +65,26 @@ export class ChatGPTAPI {
this._backendApiBaseUrl = backendApiBaseUrl
this._userAgent = userAgent

this._accessTokenCache = new ExpiryMap<string, string>(accessTokenTTL)

if (!this._sessionToken) {
throw new Error('ChatGPT invalid session token')
}
}

async getIsAuthenticated() {
try {
void (await this.refreshAccessToken())
return true
} catch (err) {
return false
}
}

async ensureAuth() {
return await this.refreshAccessToken()
}

/**
* Sends a message to ChatGPT, waits for the response to resolve, and returns
* the response.
*
* If you want to receive a stream of partial responses, use `opts.onProgress`.
* If you want to receive the full response, including message and conversation IDs,
* you can use `opts.onConversationResponse` or use the `ChatGPTAPI.getConversation`
* helper.
*
* @param message - The prompt message to send
* @param opts.conversationId - Optional ID of a conversation to continue
* @param opts.parentMessageId - Optional ID of the previous message in the conversation
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
* @param opts.onConversationResponse - Optional callback which will be invoked every time the partial response is updated with the full conversation response
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
Expand All @@ -96,11 +98,19 @@ export class ChatGPTAPI {
const {
conversationId,
parentMessageId = uuidv4(),
timeoutMs,
onProgress,
onConversationResponse,
abortSignal
onConversationResponse
} = opts

let { abortSignal } = opts

let abortController: AbortController = null
if (timeoutMs && !abortSignal) {
abortController = new AbortController()
abortSignal = abortController.signal
}

const accessToken = await this.refreshAccessToken()

const body: types.ConversationJSONBody = {
Expand All @@ -124,14 +134,9 @@ export class ChatGPTAPI {
}

const url = `${this._backendApiBaseUrl}/conversation`

// TODO: What's the best way to differentiate btwn wanting just the response text
// versus wanting the full response message, so you can extract the ID and other
// metadata?
// let fullResponse: types.Message = null
let response = ''

return new Promise((resolve, reject) => {
const responseP = new Promise<string>((resolve, reject) => {
fetchSSE(url, {
method: 'POST',
headers: {
Expand Down Expand Up @@ -164,7 +169,6 @@ export class ChatGPTAPI {
}

response = text
// fullResponse = message

if (onProgress) {
onProgress(text)
Expand All @@ -178,8 +182,56 @@ export class ChatGPTAPI {
}
}).catch(reject)
})

if (timeoutMs) {
if (abortController) {
// This will be called when a timeout occurs in order for us to forcibly
// ensure that the underlying HTTP request is aborted.
;(responseP as any).cancel = () => {
abortController.abort()
}
}

return pTimeout(responseP, {
milliseconds: timeoutMs,
message: 'ChatGPT timed out waiting for response'
})
} else {
return responseP
}
}

/**
* @returns `true` if the client has a valid acces token or `false` if refreshing
* the token fails.
*/
async getIsAuthenticated() {
try {
void (await this.refreshAccessToken())
return true
} catch (err) {
return false
}
}

/**
* Refreshes the client's access token which will succeed only if the session
* is still valid.
*/
async ensureAuth() {
return await this.refreshAccessToken()
}

/**
* Attempts to refresh the current access token using the ChatGPT
* `sessionToken` cookie.
*
* Access tokens will be cached for up to `accessTokenTTL` milliseconds to
* prevent refreshing access tokens too frequently.
*
* @returns A valid access token
* @throws An error if refreshing the access token fails.
*/
async refreshAccessToken(): Promise<string> {
const cachedAccessToken = this._accessTokenCache.get(KEY_ACCESS_TOKEN)
if (cachedAccessToken) {
Expand Down
5 changes: 3 additions & 2 deletions src/fetch.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,9 @@

// Use `undici` for node.js 16 and 17
// Use `fetch` for node.js >= 18
// Use `fetch` for browsers
// Use `fetch` for all other environments
// Use `fetch` for all other environments, including browsers
// NOTE: The top-level await is removed in a `postbuild` npm script for the
// browser build
const fetch =
globalThis.fetch ??
((await import('undici')).fetch as unknown as typeof globalThis.fetch)
Expand Down
1 change: 1 addition & 0 deletions src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,7 @@ export type MessageMetadata = any
export type SendMessageOptions = {
conversationId?: string
parentMessageId?: string
timeoutMs?: number
onProgress?: (partialResponse: string) => void
onConversationResponse?: (response: ConversationResponseEvent) => void
abortSignal?: AbortSignal
Expand Down

0 comments on commit 58795f4

Please sign in to comment.