forked from module-federation/core
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathprompt_target_pairs.jsonl
25 lines (25 loc) · 21.6 KB
/
prompt_target_pairs.jsonl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
{"prompt":"Write a function that generates a commit message using the GPT-3 model by creating a diff of the current changes and sending the prompt to the GPT-3 model. The function should truncate the diff if it exceeds the maximum character count.","target":" async function generateCommitMsg() {\n console.log('Generating commit message for staged changes...');\n\n let diff = execSync('git diff -U10 --staged').toString();\n let files = execSync('git diff --name-only --cached').toString().trim();\n console.log('files', files);\n if (!files) {\n console.error('No files to commit');\n return;\n }\n\n console.log('tokens', diff.length, MAX_CHAR_COUNT);\n const oldDiff = diff.length;\n\n if (diff.length > MAX_CHAR_COUNT) {\n diff = [\n execSync('git diff --staged --stat').toString(),\n execSync('git diff -U5 --staged').toString(),\n ].join('\\n');\n\n console.warn(\n 'over max char count, reducing diff fidelity',\n 'from:',\n oldDiff.length,\n 'to:',\n diff.length\n );\n }\n\n let prompt = createPrompt(diff);\n\n let commitMsg;\n for await (const msg of getValidJsonResponse(prompt)) {\n commitMsg = msg;\n }\n\n return commitMsg;\n}\n"}
{"prompt":"Write a function that creates a markdown-formatted commit message from a commit message object with \"Title\", \"Description\", and \"Changes\" properties. The \"Changes\" property should be an array of objects, each with a \"filename\" and a \"description\" property.","target":" function createMarkdownCommit(commitMsg = {}) {\n if (!commitMsg.Changes) {\n console.log('commit message is empty', commitMsg);\n }\n const markdownCommitMsg = `# ${commitMsg.Title}\\n\\n${\n commitMsg.Description\n }\\n\\n${commitMsg.Changes.map(\n (change) =>\n `### ${change.filename}\\n\\n${change.description\n .map((desc) => `- ${desc}`)\n .join('\\n')}`\n ).join('\\n\\n')}`;\n\n return markdownCommitMsg;\n}\n"}
{"prompt":"Write a function that executes a git commit command with a provided title and body as messages.","target":" function gitCommit(title, body) {\n // Split the body by newline characters to get an array of strings\n const bodyLines = body.split('\\n');\n\n // Create an array for the git command\n let gitCmdArray = ['git', 'commit', '-m', title];\n\n // Iterate over the bodyLines array and add a \"-m\" followed by each line\n bodyLines.forEach((line) => {\n gitCmdArray.push('-m');\n gitCmdArray.push(line);\n });\n const gitCmd = commandJoin(gitCmdArray);\n console.log('Committing changes...');\n return execSync(gitCmd, { stdio: 'inherit' });\n}\n"}
{"prompt":"Write a function that creates a prompt string for summarizing a series of git commit messages in markdown format.","target":" function createLogMarkdownPrompt(input) {\n return `\nPlease suggest one detailed summary for the series of commit messages.\nThe summary should be composed of a title and a description.\nIn the description, provide an overall summary of the changes reflected in the commits, and list each commit with a brief explanation of its content.\nReturn it as markdown format:\n# [summary title]\n[summary description]\n## Commits\n - [commit message]\n - [explanation]\n - [explanation]\n - [commit message]\n - [explanation]\n - [explanation]\n\n${response.end}\n\nHere are the commit messages: ${input.slice(0, MAX_CHAR_COUNT)}`;\n}\n"}
{"prompt":"Write a function that converts an AI generated summary of commit messages into markdown format.","target":" function createMarkdownSummary(summaryMsg = {}) {\n const markdownSummaryMsg = `# ${summaryMsg.Title}\\n\\n${\n summaryMsg.Description\n }\\n\\n${summaryMsg.Commits.map(\n (commit) =>\n `## ${commit.commit}\\n\\n${commit.explanation\n .map((desc) => `- ${desc}`)\n .join('\\n')}`\n ).join('\\n\\n')}`;\n\n return markdownSummaryMsg;\n}\n"}
{"prompt":"Write a function that executes a git commit command with a provided title and body as messages.","target":" function gitCommit(title, body) {\n // Split the body by newline characters to get an array of strings\n const bodyLines = body.split('\\n');\n\n // Create an array for the git command\n let gitCmdArray = ['git', 'commit', '-m', title];\n\n // Iterate over the bodyLines array and add a \"-m\" followed by each line\n bodyLines.forEach((line) => {\n gitCmdArray.push('-m');\n gitCmdArray.push(line);\n });\n const gitCmd = commandJoin(gitCmdArray);\n console.log('Committing changes...');\n return execSync(gitCmd, { stdio: 'inherit' });\n}\n"}
{"prompt":"Write a function that creates a prompt string for summarizing a series of git commit messages in markdown format.","target":" function createLogMarkdownPrompt(input) {\n return `\nPlease suggest one detailed summary for the series of commit messages.\nThe summary should be composed of a title and a description.\nIn the description, provide an overall summary of the changes reflected in the commits, and list each commit with a brief explanation of its content.\nReturn it as markdown format:\n# [summary title]\n[summary description]\n## Commits\n - [commit message]\n - [explanation]\n - [explanation]\n - [commit message]\n - [explanation]\n - [explanation]\n\n${response.end}\n\nHere are the commit messages: ${input.slice(0, MAX_CHAR_COUNT)}`;\n}\n"}
{"prompt":"Write a function that converts an AI generated summary of commit messages into markdown format.","target":" function createMarkdownSummary(summaryMsg = {}) {\n const markdownSummaryMsg = `# ${summaryMsg.Title}\\n\\n${\n summaryMsg.Description\n }\\n\\n${summaryMsg.Commits.map(\n (commit) =>\n `## ${commit.commit}\\n\\n${commit.explanation\n .map((desc) => `- ${desc}`)\n .join('\\n')}`\n ).join('\\n\\n')}`;\n\n return markdownSummaryMsg;\n}\n"}
{"prompt":"Write a function that sends a completion request to the OpenAI API and returns the response.","target":" async function completion({\n prompt,\n fallback,\n max_tokens,\n temperature = 0,\n model = defaultModel,\n}) {\n const messages = [\n {\n role: ChatCompletionRequestMessageRoleEnum.System,\n content: filterStopwords\n ? removeStopwords((prompt ?? '').split(' ')).join(' ')\n : prompt ?? '',\n },\n ...Array.from(chatHistory),\n ];\n\n let result;\n try {\n result = await openai.createChatCompletion({\n model,\n messages,\n temperature,\n max_tokens: max_tokens ?? 800,\n stop: [response.end],\n });\n } catch (error) {\n console.error('Error in createChatCompletion:', error);\n if (error.response) {\n console.error('HTTP response body:', error.response.data);\n }\n throw error;\n }\n\n if (!result.data.choices[0].message) {\n throw new Error('No text returned from completions endpoint');\n }\n\n const messageContent = result.data.choices[0].message.content;\n\n chatHistory.add({\n role: ChatCompletionRequestMessageRoleEnum.Assistant,\n content: messageContent,\n });\n\n return messageContent;\n}\n"}
{"prompt":"Write a function that sends a completion request to the OpenAI API and returns a stream of text.","target":" async function* completionStream({\n prompt,\n temperature,\n max_tokens,\n model = defaultModel,\n}) {\n const messages = [\n {\n role: ChatCompletionRequestMessageRoleEnum.System,\n content: filterStopwords\n ? removeStopwords((prompt ?? '').split(' ')).join(' ')\n : prompt ?? '',\n },\n ...Array.from(chatHistory),\n ];\n\n let result;\n try {\n result = await openai.createChatCompletion(\n {\n model,\n messages,\n temperature,\n max_tokens: max_tokens ?? 800,\n stream: true,\n },\n {\n responseType: 'stream',\n }\n );\n } catch (error) {\n console.error('Error in createChatCompletion:', error);\n if (error.response) {\n console.error('HTTP response body:', error.response.data);\n }\n throw error;\n }\n\n const stream = result.data;\n let buffer = '';\n const textDecoder = new TextDecoder();\n for await (const chunk of stream) {\n buffer += textDecoder.decode(chunk, { stream: true });\n\n const lines = buffer.split('\\n');\n\n if (buffer.endsWith('\\n')) {\n buffer = '';\n } else {\n buffer = lines.pop() || '';\n }\n\n for (const line of lines) {\n const message = line.trim().split('data: ')[1];\n\n if (message === '[DONE]') {\n break;\n }\n\n if (message) {\n try {\n const data = JSON.parse(message);\n\n if (data.choices[0].delta?.content) {\n yield data.choices[0].delta?.content;\n }\n } catch (error) {\n console.error('Error parsing JSON message:', error);\n }\n }\n }\n }\n\n if (buffer) {\n chatHistory.add({\n role: ChatCompletionRequestMessageRoleEnum.Assistant,\n content: buffer,\n });\n }\n}\n"}
{"prompt":"Write a function that converts an AI generated summary of commit messages into markdown format.","target":" function createMarkdownSummary(summaryMsg = {}) {\n const markdownSummaryMsg = `# ${summaryMsg.Title}\\n\\n${\n summaryMsg.Description\n }\\n\\n${summaryMsg.Commits.map(\n (commit) =>\n `## ${commit.commit}\\n\\n${commit.explanation\n .map((desc) => `- ${desc}`)\n .join('\\n')}`\n ).join('\\n\\n')}`;\n\n return markdownSummaryMsg;\n}\n"}
{"prompt":"Write a function that sends a completion request to the OpenAI API and returns the response.","target":" async function completion({\n prompt,\n fallback,\n max_tokens,\n temperature = 0,\n model = defaultModel,\n}) {\n const messages = [\n {\n role: ChatCompletionRequestMessageRoleEnum.System,\n content: filterStopwords\n ? removeStopwords((prompt ?? '').split(' ')).join(' ')\n : prompt ?? '',\n },\n ...Array.from(chatHistory),\n ];\n\n let result;\n try {\n result = await openai.createChatCompletion({\n model,\n messages,\n temperature,\n max_tokens: max_tokens ?? 800,\n stop: [response.end],\n });\n } catch (error) {\n console.error('Error in createChatCompletion:', error);\n if (error.response) {\n console.error('HTTP response body:', error.response.data);\n }\n throw error;\n }\n\n if (!result.data.choices[0].message) {\n throw new Error('No text returned from completions endpoint');\n }\n\n const messageContent = result.data.choices[0].message.content;\n\n chatHistory.add({\n role: ChatCompletionRequestMessageRoleEnum.Assistant,\n content: messageContent,\n });\n\n return messageContent;\n}\n"}
{"prompt":"Write a function that sends a completion request to the OpenAI API and returns a stream of text.","target":" async function* completionStream({\n prompt,\n temperature,\n max_tokens,\n model = defaultModel,\n}) {\n const messages = [\n {\n role: ChatCompletionRequestMessageRoleEnum.System,\n content: filterStopwords\n ? removeStopwords((prompt ?? '').split(' ')).join(' ')\n : prompt ?? '',\n },\n ...Array.from(chatHistory),\n ];\n\n let result;\n try {\n result = await openai.createChatCompletion(\n {\n model,\n messages,\n temperature,\n max_tokens: max_tokens ?? 800,\n stream: true,\n },\n {\n responseType: 'stream',\n }\n );\n } catch (error) {\n console.error('Error in createChatCompletion:', error);\n if (error.response) {\n console.error('HTTP response body:', error.response.data);\n }\n throw error;\n }\n\n const stream = result.data;\n let buffer = '';\n const textDecoder = new TextDecoder();\n for await (const chunk of stream) {\n buffer += textDecoder.decode(chunk, { stream: true });\n\n const lines = buffer.split('\\n');\n\n if (buffer.endsWith('\\n')) {\n buffer = '';\n } else {\n buffer = lines.pop() || '';\n }\n\n for (const line of lines) {\n const message = line.trim().split('data: ')[1];\n\n if (message === '[DONE]') {\n break;\n }\n\n if (message) {\n try {\n const data = JSON.parse(message);\n\n if (data.choices[0].delta?.content) {\n yield data.choices[0].delta?.content;\n }\n } catch (error) {\n console.error('Error parsing JSON message:', error);\n }\n }\n }\n }\n\n if (buffer) {\n chatHistory.add({\n role: ChatCompletionRequestMessageRoleEnum.Assistant,\n content: buffer,\n });\n }\n}\n"}
{"prompt":"Write a function that sends an embedding request to the OpenAI API and returns the response.","target":" async function embedding({ input, model = 'text-embedding-ada-002' }) {\n const result = await openai.createEmbedding({\n model: 'text-embedding-ada-002', //cant be GPT4\n input,\n });\n\n if (!result.data.data[0].embedding) {\n throw new Error('No embedding returned from the completions endpoint');\n }\n\n return result.data.data.map((d) => d.embedding);\n}\n"}
{"prompt":"Write a function that parses a given parameter from a route and returns a data structure that can be used to generate the parametrized route. Examples: - `[...slug]` -> `{ name: 'slug', repeat: true, optional: true }` - `[foo]` -> `{ name: 'foo', repeat: false, optional: true }` - `bar` -> `{ name: 'bar', repeat: false, optional: false }`","target":" function parseParameter(param: string) {\n const optional = param.startsWith('[') && param.endsWith(']');\n if (optional) {\n param = param.slice(1, -1);\n }\n const repeat = param.startsWith('...');\n if (repeat) {\n param = param.slice(3);\n }\n return { key: param, repeat, optional };\n}\n"}
{"prompt":"Write a function that sends a completion request to the OpenAI API and returns a stream of text.","target":" async function* completionStream({\n prompt,\n temperature,\n max_tokens,\n model = defaultModel,\n}) {\n const messages = [\n {\n role: ChatCompletionRequestMessageRoleEnum.System,\n content: filterStopwords\n ? removeStopwords((prompt ?? '').split(' ')).join(' ')\n : prompt ?? '',\n },\n ...Array.from(chatHistory),\n ];\n\n let result;\n try {\n result = await openai.createChatCompletion(\n {\n model,\n messages,\n temperature,\n max_tokens: max_tokens ?? 800,\n stream: true,\n },\n {\n responseType: 'stream',\n }\n );\n } catch (error) {\n console.error('Error in createChatCompletion:', error);\n if (error.response) {\n console.error('HTTP response body:', error.response.data);\n }\n throw error;\n }\n\n const stream = result.data;\n let buffer = '';\n const textDecoder = new TextDecoder();\n for await (const chunk of stream) {\n buffer += textDecoder.decode(chunk, { stream: true });\n\n const lines = buffer.split('\\n');\n\n if (buffer.endsWith('\\n')) {\n buffer = '';\n } else {\n buffer = lines.pop() || '';\n }\n\n for (const line of lines) {\n const message = line.trim().split('data: ')[1];\n\n if (message === '[DONE]') {\n break;\n }\n\n if (message) {\n try {\n const data = JSON.parse(message);\n\n if (data.choices[0].delta?.content) {\n yield data.choices[0].delta?.content;\n }\n } catch (error) {\n console.error('Error parsing JSON message:', error);\n }\n }\n }\n }\n\n if (buffer) {\n chatHistory.add({\n role: ChatCompletionRequestMessageRoleEnum.Assistant,\n content: buffer,\n });\n }\n}\n"}
{"prompt":"Write a function that sends an embedding request to the OpenAI API and returns the response.","target":" async function embedding({ input, model = 'text-embedding-ada-002' }) {\n const result = await openai.createEmbedding({\n model: 'text-embedding-ada-002', //cant be GPT4\n input,\n });\n\n if (!result.data.data[0].embedding) {\n throw new Error('No embedding returned from the completions endpoint');\n }\n\n return result.data.data.map((d) => d.embedding);\n}\n"}
{"prompt":"Write a function that parses a given parameter from a route and returns a data structure that can be used to generate the parametrized route.","target":" function parseParameter(param: string) {\n const optional = param.startsWith('[') && param.endsWith(']');\n if (optional) {\n param = param.slice(1, -1);\n }\n const repeat = param.startsWith('...');\n if (repeat) {\n param = param.slice(3);\n }\n return { key: param, repeat, optional };\n}\n"}
{"prompt":"Write a function that converts a browser pathname to a Next.js route for proper handling of dynamic routes.","target":" export function pathnameToRoute(\n cleanPathname: string,\n routes: string[]\n): string | undefined {\n if (routes.includes(cleanPathname)) {\n return cleanPathname;\n }\n\n for (const route of routes) {\n if (isDynamicRoute(route) && getRouteRegex(route).re.test(cleanPathname)) {\n return route;\n }\n }\n\n return undefined;\n}\n"}
{"prompt":"Write a function that sorts provided pages in the correct Next.js order for proper handling of dynamic routes.","target":" export function sortNextPages(pages: string[]): string[] {\n const root = new UrlNode();\n pages.forEach((pageRoute) => root.insert(pageRoute));\n // Smoosh will then sort those sublevels up to the point where you get the correct route definition priority\n return root.smoosh();\n}\n"}
{"prompt":"Write a function that parses a given parameter from a route and returns a data structure that can be used to generate the parametrized route.","target":" function parseParameter(param: string) {\n const optional = param.startsWith('[') && param.endsWith(']');\n if (optional) {\n param = param.slice(1, -1);\n }\n const repeat = param.startsWith('...');\n if (repeat) {\n param = param.slice(3);\n }\n return { key: param, repeat, optional };\n}\n"}
{"prompt":"Write a function that converts a browser pathname to a Next.js route for proper handling of dynamic routes.","target":" export function pathnameToRoute(\n cleanPathname: string,\n routes: string[]\n): string | undefined {\n if (routes.includes(cleanPathname)) {\n return cleanPathname;\n }\n\n for (const route of routes) {\n if (isDynamicRoute(route) && getRouteRegex(route).re.test(cleanPathname)) {\n return route;\n }\n }\n\n return undefined;\n}\n"}
{"prompt":"Write a function that sorts provided pages in the correct Next.js order for proper handling of dynamic routes.","target":" export function sortNextPages(pages: string[]): string[] {\n const root = new UrlNode();\n pages.forEach((pageRoute) => root.insert(pageRoute));\n // Smoosh will then sort those sublevels up to the point where you get the correct route definition priority\n return root.smoosh();\n}\n"}
{"prompt":"Write a React hook that provides a convenient way to work with Module Federation runtime changes.","target":" export function useMFClient(opts: MFClientHookOptions): MFClient {\n const MFClient: MFClient = isBrowser\n ? (window as any).mf_client\n : /* TODO: inject here SSR version of MFClient if it will be needed in future */ ({} as any);\n\n const innerState = React.useRef<InnerState>({\n remote: undefined,\n });\n\n React.useEffect(() => {\n // Step 1: Define handlers and helpers\n const processRemoteChange = (remote: RemoteContainer | undefined) => {\n if (innerState.current.remote !== remote) {\n innerState.current.remote = remote;\n if (opts?.onChangeRemote) {\n opts.onChangeRemote(remote, MFClient);\n }\n }\n };\n\n const handleRouterChange = (pathname: string) => {\n if (MFClient.isFederatedPathname(pathname)) {\n const remote = MFClient.remotePages.routeToRemote(pathname);\n processRemoteChange(remote);\n } else {\n processRemoteChange(undefined);\n }\n };\n\n // Step 2: run bootstrap logic\n const initialRemote = MFClient.isFederatedPathname(window.location.pathname)\n ? MFClient.remotePages.routeToRemote(window.location.pathname)\n : undefined;\n\n if (initialRemote) {\n // important for first load to fire `onChangeRemote` with different remote\n // because in innerState by default we assume that used local application\n processRemoteChange(initialRemote);\n }\n\n // Step 3: Subscribe on events\n singletonRouter.events.on('routeChangeStart', handleRouterChange);\n return () => {\n singletonRouter.events.off('routeChangeStart', handleRouterChange);\n };\n }, []);\n\n return MFClient;\n}\n"}
{"prompt":"Write a React hook that provides access to a RemoteContainer in Module Federation.","target":" export function useMFRemote(global: string): UseMFRemoteResult {\n let remote: RemoteContainer;\n\n if (isBrowser) {\n // on client (we get instances from global variable because webpack breaks Singletons)\n const MFClient: MFClient = (window as any).mf_client;\n remote = MFClient.remotes[global] || MFClient.registerRemote(global);\n } else {\n // on server side\n remote = RemoteContainer.createSingleton(global);\n }\n\n const [loaded, setLoaded] = React.useState(remote.isLoaded());\n const [error, setError] = React.useState(remote.error);\n\n React.useEffect(() => {\n const handleLoadComplete = () => {\n setLoaded(true);\n };\n const handleLoadError = (e: Error) => {\n setError(e);\n };\n\n if (!loaded && remote.isLoaded()) {\n handleLoadComplete();\n }\n\n remote.events.on('loadComplete', handleLoadComplete);\n remote.events.on('loadError', handleLoadError);\n return () => {\n remote.events.off('loadComplete', handleLoadComplete);\n remote.events.off('loadError', handleLoadError);\n };\n }, [remote]);\n\n return {\n remote,\n loaded,\n error,\n };\n}\n"}