From 16e094b5e50e4b61e5d1f6519282a12672fc71a3 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Thu, 17 Oct 2024 16:52:53 +0000 Subject: [PATCH 001/246] feat(api): add gpt-4o-audio-preview model for chat completions (#1135) This enables audio inputs and outputs. https://platform.openai.com/docs/guides/audio --- .stats.yml | 2 +- api.md | 4 + src/index.ts | 4 + src/lib/AbstractChatCompletionRunner.ts | 4 +- src/resources/beta/assistants.ts | 10 ++ src/resources/chat/chat.ts | 7 + src/resources/chat/completions.ts | 153 ++++++++++++++++++- src/resources/chat/index.ts | 4 + tests/api-resources/chat/completions.test.ts | 2 + 9 files changed, 183 insertions(+), 7 deletions(-) diff --git a/.stats.yml b/.stats.yml index 68789976b..984e8a8d5 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-71e58a77027c67e003fdd1b1ac8ac11557d8bfabc7666d1a827c6b1ca8ab98b5.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8729aaa35436531ab453224af10e67f89677db8f350f0346bb3537489edea649.yml diff --git a/api.md b/api.md index 71027acfd..da60f65bd 100644 --- a/api.md +++ b/api.md @@ -33,9 +33,12 @@ Types: - ChatCompletion - ChatCompletionAssistantMessageParam +- ChatCompletionAudio +- ChatCompletionAudioParam - ChatCompletionChunk - ChatCompletionContentPart - ChatCompletionContentPartImage +- ChatCompletionContentPartInputAudio - ChatCompletionContentPartRefusal - ChatCompletionContentPartText - ChatCompletionFunctionCallOption @@ -43,6 +46,7 @@ Types: - ChatCompletionMessage - ChatCompletionMessageParam - ChatCompletionMessageToolCall +- ChatCompletionModality - ChatCompletionNamedToolChoice - ChatCompletionRole - ChatCompletionStreamOptions diff --git a/src/index.ts b/src/index.ts index d3e1d2a78..56108223a 100644 --- a/src/index.ts +++ b/src/index.ts @@ -250,9 +250,12 @@ export namespace OpenAI { export import ChatModel = API.ChatModel; export import ChatCompletion = API.ChatCompletion; export import ChatCompletionAssistantMessageParam = API.ChatCompletionAssistantMessageParam; + export import ChatCompletionAudio = API.ChatCompletionAudio; + export import ChatCompletionAudioParam = API.ChatCompletionAudioParam; export import ChatCompletionChunk = API.ChatCompletionChunk; export import ChatCompletionContentPart = API.ChatCompletionContentPart; export import ChatCompletionContentPartImage = API.ChatCompletionContentPartImage; + export import ChatCompletionContentPartInputAudio = API.ChatCompletionContentPartInputAudio; export import ChatCompletionContentPartRefusal = API.ChatCompletionContentPartRefusal; export import ChatCompletionContentPartText = API.ChatCompletionContentPartText; export import ChatCompletionFunctionCallOption = API.ChatCompletionFunctionCallOption; @@ -260,6 +263,7 @@ export namespace OpenAI { export import ChatCompletionMessage = API.ChatCompletionMessage; export import ChatCompletionMessageParam = API.ChatCompletionMessageParam; export import ChatCompletionMessageToolCall = API.ChatCompletionMessageToolCall; + export import ChatCompletionModality = API.ChatCompletionModality; export import ChatCompletionNamedToolChoice = API.ChatCompletionNamedToolChoice; export import ChatCompletionRole = API.ChatCompletionRole; export import ChatCompletionStreamOptions = API.ChatCompletionStreamOptions; diff --git a/src/lib/AbstractChatCompletionRunner.ts b/src/lib/AbstractChatCompletionRunner.ts index 39ee4e993..e943a4e4f 100644 --- a/src/lib/AbstractChatCompletionRunner.ts +++ b/src/lib/AbstractChatCompletionRunner.ts @@ -105,7 +105,9 @@ export class AbstractChatCompletionRunner< const message = this.messages[i]; if (isAssistantMessage(message)) { const { function_call, ...rest } = message; - const ret: ChatCompletionMessage = { + + // TODO: support audio here + const ret: Omit = { ...rest, content: (message as ChatCompletionMessage).content ?? null, refusal: (message as ChatCompletionMessage).refusal ?? null, diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 410d520b0..aa7362297 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -298,6 +298,11 @@ export namespace AssistantStreamEvent { data: ThreadsAPI.Thread; event: 'thread.created'; + + /** + * Whether to enable input audio transcription. + */ + enabled?: boolean; } /** @@ -1084,6 +1089,11 @@ export interface ThreadStreamEvent { data: ThreadsAPI.Thread; event: 'thread.created'; + + /** + * Whether to enable input audio transcription. + */ + enabled?: boolean; } export interface AssistantCreateParams { diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 5bc7de955..43ef5662c 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -16,7 +16,10 @@ export type ChatModel = | 'gpt-4o' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-05-13' + | 'gpt-4o-realtime-preview' | 'gpt-4o-realtime-preview-2024-10-01' + | 'gpt-4o-audio-preview' + | 'gpt-4o-audio-preview-2024-10-01' | 'chatgpt-4o-latest' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' @@ -45,9 +48,12 @@ export namespace Chat { export import Completions = CompletionsAPI.Completions; export import ChatCompletion = CompletionsAPI.ChatCompletion; export import ChatCompletionAssistantMessageParam = CompletionsAPI.ChatCompletionAssistantMessageParam; + export import ChatCompletionAudio = CompletionsAPI.ChatCompletionAudio; + export import ChatCompletionAudioParam = CompletionsAPI.ChatCompletionAudioParam; export import ChatCompletionChunk = CompletionsAPI.ChatCompletionChunk; export import ChatCompletionContentPart = CompletionsAPI.ChatCompletionContentPart; export import ChatCompletionContentPartImage = CompletionsAPI.ChatCompletionContentPartImage; + export import ChatCompletionContentPartInputAudio = CompletionsAPI.ChatCompletionContentPartInputAudio; export import ChatCompletionContentPartRefusal = CompletionsAPI.ChatCompletionContentPartRefusal; export import ChatCompletionContentPartText = CompletionsAPI.ChatCompletionContentPartText; export import ChatCompletionFunctionCallOption = CompletionsAPI.ChatCompletionFunctionCallOption; @@ -55,6 +61,7 @@ export namespace Chat { export import ChatCompletionMessage = CompletionsAPI.ChatCompletionMessage; export import ChatCompletionMessageParam = CompletionsAPI.ChatCompletionMessageParam; export import ChatCompletionMessageToolCall = CompletionsAPI.ChatCompletionMessageToolCall; + export import ChatCompletionModality = CompletionsAPI.ChatCompletionModality; export import ChatCompletionNamedToolChoice = CompletionsAPI.ChatCompletionNamedToolChoice; export import ChatCompletionRole = CompletionsAPI.ChatCompletionRole; export import ChatCompletionStreamOptions = CompletionsAPI.ChatCompletionStreamOptions; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 27aebdc4c..97174ec1b 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -11,7 +11,10 @@ import { Stream } from '../../streaming'; export class Completions extends APIResource { /** - * Creates a model response for the given chat conversation. + * Creates a model response for the given chat conversation. Learn more in the + * [text generation](https://platform.openai.com/docs/guides/text-generation), + * [vision](https://platform.openai.com/docs/guides/vision), and + * [audio](https://platform.openai.com/docs/guides/audio) guides. */ create( body: ChatCompletionCreateParamsNonStreaming, @@ -138,6 +141,12 @@ export interface ChatCompletionAssistantMessageParam { */ role: 'assistant'; + /** + * Data about a previous audio response from the model. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ + audio?: ChatCompletionAssistantMessageParam.Audio | null; + /** * The contents of the assistant message. Required unless `tool_calls` or * `function_call` is specified. @@ -168,6 +177,17 @@ export interface ChatCompletionAssistantMessageParam { } export namespace ChatCompletionAssistantMessageParam { + /** + * Data about a previous audio response from the model. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ + export interface Audio { + /** + * Unique identifier for a previous audio response from the model. + */ + id: string; + } + /** * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of * a function that should be called, as generated by the model. @@ -188,6 +208,54 @@ export namespace ChatCompletionAssistantMessageParam { } } +/** + * If the audio output modality is requested, this object contains data about the + * audio response from the model. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ +export interface ChatCompletionAudio { + /** + * Unique identifier for this audio response. + */ + id: string; + + /** + * Base64 encoded audio bytes generated by the model, in the format specified in + * the request. + */ + data: string; + + /** + * The Unix timestamp (in seconds) for when this audio response will no longer be + * accessible on the server for use in multi-turn conversations. + */ + expires_at: number; + + /** + * Transcript of the audio generated by the model. + */ + transcript: string; +} + +/** + * Parameters for audio output. Required when audio output is requested with + * `modalities: ["audio"]`. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ +export interface ChatCompletionAudioParam { + /** + * Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, + * or `pcm16`. + */ + format: 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16'; + + /** + * Specifies the voice type. Supported voices are `alloy`, `echo`, `fable`, `onyx`, + * `nova`, and `shimmer`. + */ + voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer'; +} + /** * Represents a streamed chunk of a chat completion response returned by model, * based on the provided input. @@ -371,8 +439,18 @@ export namespace ChatCompletionChunk { } } -export type ChatCompletionContentPart = ChatCompletionContentPartText | ChatCompletionContentPartImage; +/** + * Learn about + * [text inputs](https://platform.openai.com/docs/guides/text-generation). + */ +export type ChatCompletionContentPart = + | ChatCompletionContentPartText + | ChatCompletionContentPartImage + | ChatCompletionContentPartInputAudio; +/** + * Learn about [image inputs](https://platform.openai.com/docs/guides/vision). + */ export interface ChatCompletionContentPartImage { image_url: ChatCompletionContentPartImage.ImageURL; @@ -397,6 +475,32 @@ export namespace ChatCompletionContentPartImage { } } +/** + * Learn about [audio inputs](https://platform.openai.com/docs/guides/audio). + */ +export interface ChatCompletionContentPartInputAudio { + input_audio: ChatCompletionContentPartInputAudio.InputAudio; + + /** + * The type of the content part. Always `input_audio`. + */ + type: 'input_audio'; +} + +export namespace ChatCompletionContentPartInputAudio { + export interface InputAudio { + /** + * Base64 encoded audio data. + */ + data: string; + + /** + * The format of the encoded audio data. Currently supports "wav" and "mp3". + */ + format: 'wav' | 'mp3'; + } +} + export interface ChatCompletionContentPartRefusal { /** * The refusal message generated by the model. @@ -409,6 +513,10 @@ export interface ChatCompletionContentPartRefusal { type: 'refusal'; } +/** + * Learn about + * [text inputs](https://platform.openai.com/docs/guides/text-generation). + */ export interface ChatCompletionContentPartText { /** * The text content. @@ -471,6 +579,13 @@ export interface ChatCompletionMessage { */ role: 'assistant'; + /** + * If the audio output modality is requested, this object contains data about the + * audio response from the model. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ + audio?: ChatCompletionAudio | null; + /** * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of * a function that should be called, as generated by the model. @@ -548,6 +663,8 @@ export namespace ChatCompletionMessageToolCall { } } +export type ChatCompletionModality = 'text' | 'audio'; + /** * Specifies a tool the model should use. Use to force the model to call a specific * function. @@ -743,6 +860,13 @@ export interface ChatCompletionCreateParamsBase { */ model: (string & {}) | ChatAPI.ChatModel; + /** + * Parameters for audio output. Required when audio output is requested with + * `modalities: ["audio"]`. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ + audio?: ChatCompletionAudioParam | null; + /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on their * existing frequency in the text so far, decreasing the model's likelihood to @@ -812,10 +936,24 @@ export interface ChatCompletionCreateParamsBase { /** * Developer-defined tags and values used for filtering completions in the - * [dashboard](https://platform.openai.com/completions). + * [dashboard](https://platform.openai.com/chat-completions). */ metadata?: Record | null; + /** + * Output types that you would like the model to generate for this request. Most + * models are capable of generating text, which is the default: + * + * `["text"]` + * + * The `gpt-4o-audio-preview` model can also be used to + * [generate audio](https://platform.openai.com/docs/guides/audio). To request that + * this model generate both text and audio responses, you can use: + * + * `["text", "audio"]` + */ + modalities?: Array | null; + /** * How many chat completion choices to generate for each input message. Note that * you will be charged based on the number of generated tokens across all of the @@ -900,8 +1038,9 @@ export interface ChatCompletionCreateParamsBase { stop?: string | null | Array; /** - * Whether or not to store the output of this completion request for traffic - * logging in the [dashboard](https://platform.openai.com/completions). + * Whether or not to store the output of this chat completion request for use in + * our [model distillation](https://platform.openai.com/docs/guides/distillation) + * or [evals](https://platform.openai.com/docs/guides/evals) products. */ store?: boolean | null; @@ -1049,9 +1188,12 @@ export type CompletionCreateParamsStreaming = ChatCompletionCreateParamsStreamin export namespace Completions { export import ChatCompletion = ChatCompletionsAPI.ChatCompletion; export import ChatCompletionAssistantMessageParam = ChatCompletionsAPI.ChatCompletionAssistantMessageParam; + export import ChatCompletionAudio = ChatCompletionsAPI.ChatCompletionAudio; + export import ChatCompletionAudioParam = ChatCompletionsAPI.ChatCompletionAudioParam; export import ChatCompletionChunk = ChatCompletionsAPI.ChatCompletionChunk; export import ChatCompletionContentPart = ChatCompletionsAPI.ChatCompletionContentPart; export import ChatCompletionContentPartImage = ChatCompletionsAPI.ChatCompletionContentPartImage; + export import ChatCompletionContentPartInputAudio = ChatCompletionsAPI.ChatCompletionContentPartInputAudio; export import ChatCompletionContentPartRefusal = ChatCompletionsAPI.ChatCompletionContentPartRefusal; export import ChatCompletionContentPartText = ChatCompletionsAPI.ChatCompletionContentPartText; export import ChatCompletionFunctionCallOption = ChatCompletionsAPI.ChatCompletionFunctionCallOption; @@ -1059,6 +1201,7 @@ export namespace Completions { export import ChatCompletionMessage = ChatCompletionsAPI.ChatCompletionMessage; export import ChatCompletionMessageParam = ChatCompletionsAPI.ChatCompletionMessageParam; export import ChatCompletionMessageToolCall = ChatCompletionsAPI.ChatCompletionMessageToolCall; + export import ChatCompletionModality = ChatCompletionsAPI.ChatCompletionModality; export import ChatCompletionNamedToolChoice = ChatCompletionsAPI.ChatCompletionNamedToolChoice; export import ChatCompletionRole = ChatCompletionsAPI.ChatCompletionRole; export import ChatCompletionStreamOptions = ChatCompletionsAPI.ChatCompletionStreamOptions; diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index 748770948..22803e819 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -3,9 +3,12 @@ export { ChatCompletion, ChatCompletionAssistantMessageParam, + ChatCompletionAudio, + ChatCompletionAudioParam, ChatCompletionChunk, ChatCompletionContentPart, ChatCompletionContentPartImage, + ChatCompletionContentPartInputAudio, ChatCompletionContentPartRefusal, ChatCompletionContentPartText, ChatCompletionFunctionCallOption, @@ -13,6 +16,7 @@ export { ChatCompletionMessage, ChatCompletionMessageParam, ChatCompletionMessageToolCall, + ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionRole, ChatCompletionStreamOptions, diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 4f015b47e..77d4a251c 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -27,6 +27,7 @@ describe('resource completions', () => { const response = await client.chat.completions.create({ messages: [{ content: 'string', role: 'system', name: 'name' }], model: 'gpt-4o', + audio: { format: 'wav', voice: 'alloy' }, frequency_penalty: -2, function_call: 'none', functions: [{ name: 'name', description: 'description', parameters: { foo: 'bar' } }], @@ -35,6 +36,7 @@ describe('resource completions', () => { max_completion_tokens: 0, max_tokens: 0, metadata: { foo: 'string' }, + modalities: ['text', 'audio'], n: 1, parallel_tool_calls: true, presence_penalty: -2, From 6ae19ce08eea4f6b3c4865861a6cce09d403cac8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 17:19:37 +0000 Subject: [PATCH 002/246] release: 4.68.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e8c54ecee..91b39801d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.67.3" + ".": "4.68.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 710d09ca9..2fcd3be4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.68.0 (2024-10-17) + +Full Changelog: [v4.67.3...v4.68.0](https://github.com/openai/openai-node/compare/v4.67.3...v4.68.0) + +### Features + +* **api:** add gpt-4o-audio-preview model for chat completions ([#1135](https://github.com/openai/openai-node/issues/1135)) ([17a623f](https://github.com/openai/openai-node/commit/17a623f70050bca4538ad2939055cd9d9b165f89)) + ## 4.67.3 (2024-10-08) Full Changelog: [v4.67.2...v4.67.3](https://github.com/openai/openai-node/compare/v4.67.2...v4.67.3) diff --git a/README.md b/README.md index 407933634..bbfc821d2 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.67.3/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.68.0/mod.ts'; ``` diff --git a/package.json b/package.json index e20c1b9c1..807c79098 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.67.3", + "version": "4.68.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index f59404dbc..5e813aeb2 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.67.3/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.68.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 174c31111..12aaa52bb 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.67.3'; // x-release-please-version +export const VERSION = '4.68.0'; // x-release-please-version From 02fd7699130e2ff442aca45622b064bc4eda6fab Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 18 Oct 2024 18:12:40 +0000 Subject: [PATCH 003/246] fix(client): respect x-stainless-retry-count default headers (#1138) --- src/core.ts | 10 +++++++--- tests/index.test.ts | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/src/core.ts b/src/core.ts index d78e9e926..9d90178ab 100644 --- a/src/core.ts +++ b/src/core.ts @@ -386,9 +386,13 @@ export abstract class APIClient { delete reqHeaders['content-type']; } - // Don't set the retry count header if it was already set or removed by the caller. We check `headers`, - // which can contain nulls, instead of `reqHeaders` to account for the removal case. - if (getHeader(headers, 'x-stainless-retry-count') === undefined) { + // Don't set the retry count header if it was already set or removed through default headers or by the + // caller. We check `defaultHeaders` and `headers`, which can contain nulls, instead of `reqHeaders` to + // account for the removal case. + if ( + getHeader(defaultHeaders, 'x-stainless-retry-count') === undefined && + getHeader(headers, 'x-stainless-retry-count') === undefined + ) { reqHeaders['x-stainless-retry-count'] = String(retryCount); } diff --git a/tests/index.test.ts b/tests/index.test.ts index b55ec5f67..f39571121 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -295,6 +295,39 @@ describe('retries', () => { expect(capturedRequest!.headers as Headers).not.toHaveProperty('x-stainless-retry-count'); }); + test('omit retry count header by default', async () => { + let count = 0; + let capturedRequest: RequestInit | undefined; + const testFetch = async (url: RequestInfo, init: RequestInit = {}): Promise => { + count++; + if (count <= 2) { + return new Response(undefined, { + status: 429, + headers: { + 'Retry-After': '0.1', + }, + }); + } + capturedRequest = init; + return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); + }; + const client = new OpenAI({ + apiKey: 'My API Key', + fetch: testFetch, + maxRetries: 4, + defaultHeaders: { 'X-Stainless-Retry-Count': null }, + }); + + expect( + await client.request({ + path: '/foo', + method: 'get', + }), + ).toEqual({ a: 1 }); + + expect(capturedRequest!.headers as Headers).not.toHaveProperty('x-stainless-retry-count'); + }); + test('overwrite retry count header', async () => { let count = 0; let capturedRequest: RequestInit | undefined; From d08bf1a8fa779e6a9349d92ddf65530dd84e686d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 18 Oct 2024 18:13:06 +0000 Subject: [PATCH 004/246] release: 4.68.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 91b39801d..64f1d21d4 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.68.0" + ".": "4.68.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 2fcd3be4d..9a2102f46 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.68.1 (2024-10-18) + +Full Changelog: [v4.68.0...v4.68.1](https://github.com/openai/openai-node/compare/v4.68.0...v4.68.1) + +### Bug Fixes + +* **client:** respect x-stainless-retry-count default headers ([#1138](https://github.com/openai/openai-node/issues/1138)) ([266717b](https://github.com/openai/openai-node/commit/266717b3301828c7df735064a380a055576183bc)) + ## 4.68.0 (2024-10-17) Full Changelog: [v4.67.3...v4.68.0](https://github.com/openai/openai-node/compare/v4.67.3...v4.68.0) diff --git a/README.md b/README.md index bbfc821d2..d4b838897 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.68.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.68.1/mod.ts'; ``` diff --git a/package.json b/package.json index 807c79098..538163b0f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.68.0", + "version": "4.68.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 5e813aeb2..b7459b609 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.68.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.68.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 12aaa52bb..dcff7c8bd 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.68.0'; // x-release-please-version +export const VERSION = '4.68.1'; // x-release-please-version From 9b27b22f83756c91c9277ce8334da2120b6afe90 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 18:23:54 +0000 Subject: [PATCH 005/246] chore(internal): update spec version (#1141) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 984e8a8d5..e1a430e50 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8729aaa35436531ab453224af10e67f89677db8f350f0346bb3537489edea649.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-f9320ebf347140052c7f8b0bc5c7db24f5e367c368c8cb34c3606af4e2b6591b.yml From ab7770115e88ff1274cf8863afddd6b58f8f158f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 18:24:22 +0000 Subject: [PATCH 006/246] release: 4.68.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 64f1d21d4..7de9a93f1 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.68.1" + ".": "4.68.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a2102f46..93cf66d4e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.68.2 (2024-10-22) + +Full Changelog: [v4.68.1...v4.68.2](https://github.com/openai/openai-node/compare/v4.68.1...v4.68.2) + +### Chores + +* **internal:** update spec version ([#1141](https://github.com/openai/openai-node/issues/1141)) ([2ccb3e3](https://github.com/openai/openai-node/commit/2ccb3e357aa2f3eb0fa32c619d8336c3b94cc882)) + ## 4.68.1 (2024-10-18) Full Changelog: [v4.68.0...v4.68.1](https://github.com/openai/openai-node/compare/v4.68.0...v4.68.1) diff --git a/README.md b/README.md index d4b838897..5011b82a1 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.68.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.68.2/mod.ts'; ``` diff --git a/package.json b/package.json index 538163b0f..0eaebee91 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.68.1", + "version": "4.68.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index b7459b609..c2276e5ea 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.68.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.68.2/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index dcff7c8bd..bb7f3f7bd 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.68.1'; // x-release-please-version +export const VERSION = '4.68.2'; // x-release-please-version From 58a645d572572a3de2688a1fd8511f3edab97866 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 20:11:28 +0000 Subject: [PATCH 007/246] chore(internal): bumps eslint and related dependencies (#1143) --- yarn.lock | 180 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 92 insertions(+), 88 deletions(-) diff --git a/yarn.lock b/yarn.lock index 5a01e39e3..91b22b941 100644 --- a/yarn.lock +++ b/yarn.lock @@ -322,9 +322,9 @@ eslint-visitor-keys "^3.3.0" "@eslint-community/regexpp@^4.5.1": - version "4.9.0" - resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.9.0.tgz#7ccb5f58703fa61ffdcbf39e2c604a109e781162" - integrity sha512-zJmuCWj2VLBt4c25CfBIbMZLGLyhkvs7LznyVX5HfpzeocThgIj5XQK4L+g3U36mMcx8bPMhGyPpwCATamC4jQ== + version "4.11.1" + resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.11.1.tgz#a547badfc719eb3e5f4b556325e542fbe9d7a18f" + integrity sha512-m4DVN9ZqskZoLU5GlWZadwDnYo3vAEydiUayB9widCl9ffWx2IvPnp6n3on5rJmziJSw9Bv+Z3ChDVdMwXCY8Q== "@eslint-community/regexpp@^4.6.1": version "4.6.2" @@ -857,9 +857,9 @@ pretty-format "^29.0.0" "@types/json-schema@^7.0.12": - version "7.0.13" - resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.13.tgz#02c24f4363176d2d18fc8b70b9f3c54aba178a85" - integrity sha512-RbSSoHliUbnXj3ny0CNFOoxrIDV6SUGyStHsvDqosw6CkdPV8TtWGlfecuK4ToyMEAql6pzNxgCFKanovUzlgQ== + version "7.0.15" + resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.15.tgz#596a1747233694d50f6ad8a7869fcb6f56cf5841" + integrity sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA== "@types/node-fetch@^2.6.4": version "2.6.4" @@ -882,9 +882,9 @@ integrity sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA== "@types/semver@^7.5.0": - version "7.5.3" - resolved "https://registry.yarnpkg.com/@types/semver/-/semver-7.5.3.tgz#9a726e116beb26c24f1ccd6850201e1246122e04" - integrity sha512-OxepLK9EuNEIPxWNME+C6WwbRAOOI2o2BaQEGzz5Lu2e4Z5eDnEo+/aVEDMIXywoJitJ7xWd641wrGLZdtwRyw== + version "7.5.8" + resolved "https://registry.yarnpkg.com/@types/semver/-/semver-7.5.8.tgz#8268a8c57a3e4abd25c165ecd36237db7948a55e" + integrity sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ== "@types/stack-utils@^2.0.0": version "2.0.3" @@ -904,15 +904,15 @@ "@types/yargs-parser" "*" "@typescript-eslint/eslint-plugin@^6.7.0": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.7.3.tgz#d98046e9f7102d49a93d944d413c6055c47fafd7" - integrity sha512-vntq452UHNltxsaaN+L9WyuMch8bMd9CqJ3zhzTPXXidwbf5mqqKCVXEuvRZUqLJSTLeWE65lQwyXsRGnXkCTA== + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz#30830c1ca81fd5f3c2714e524c4303e0194f9cd3" + integrity sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA== dependencies: "@eslint-community/regexpp" "^4.5.1" - "@typescript-eslint/scope-manager" "6.7.3" - "@typescript-eslint/type-utils" "6.7.3" - "@typescript-eslint/utils" "6.7.3" - "@typescript-eslint/visitor-keys" "6.7.3" + "@typescript-eslint/scope-manager" "6.21.0" + "@typescript-eslint/type-utils" "6.21.0" + "@typescript-eslint/utils" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" debug "^4.3.4" graphemer "^1.4.0" ignore "^5.2.4" @@ -921,71 +921,72 @@ ts-api-utils "^1.0.1" "@typescript-eslint/parser@^6.7.0": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-6.7.3.tgz#aaf40092a32877439e5957e18f2d6a91c82cc2fd" - integrity sha512-TlutE+iep2o7R8Lf+yoer3zU6/0EAUc8QIBB3GYBc1KGz4c4TRm83xwXUZVPlZ6YCLss4r77jbu6j3sendJoiQ== - dependencies: - "@typescript-eslint/scope-manager" "6.7.3" - "@typescript-eslint/types" "6.7.3" - "@typescript-eslint/typescript-estree" "6.7.3" - "@typescript-eslint/visitor-keys" "6.7.3" + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-6.21.0.tgz#af8fcf66feee2edc86bc5d1cf45e33b0630bf35b" + integrity sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ== + dependencies: + "@typescript-eslint/scope-manager" "6.21.0" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/typescript-estree" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" debug "^4.3.4" -"@typescript-eslint/scope-manager@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-6.7.3.tgz#07e5709c9bdae3eaf216947433ef97b3b8b7d755" - integrity sha512-wOlo0QnEou9cHO2TdkJmzF7DFGvAKEnB82PuPNHpT8ZKKaZu6Bm63ugOTn9fXNJtvuDPanBc78lGUGGytJoVzQ== +"@typescript-eslint/scope-manager@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz#ea8a9bfc8f1504a6ac5d59a6df308d3a0630a2b1" + integrity sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg== dependencies: - "@typescript-eslint/types" "6.7.3" - "@typescript-eslint/visitor-keys" "6.7.3" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" -"@typescript-eslint/type-utils@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-6.7.3.tgz#c2c165c135dda68a5e70074ade183f5ad68f3400" - integrity sha512-Fc68K0aTDrKIBvLnKTZ5Pf3MXK495YErrbHb1R6aTpfK5OdSFj0rVN7ib6Tx6ePrZ2gsjLqr0s98NG7l96KSQw== +"@typescript-eslint/type-utils@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz#6473281cfed4dacabe8004e8521cee0bd9d4c01e" + integrity sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag== dependencies: - "@typescript-eslint/typescript-estree" "6.7.3" - "@typescript-eslint/utils" "6.7.3" + "@typescript-eslint/typescript-estree" "6.21.0" + "@typescript-eslint/utils" "6.21.0" debug "^4.3.4" ts-api-utils "^1.0.1" -"@typescript-eslint/types@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-6.7.3.tgz#0402b5628a63f24f2dc9d4a678e9a92cc50ea3e9" - integrity sha512-4g+de6roB2NFcfkZb439tigpAMnvEIg3rIjWQ+EM7IBaYt/CdJt6em9BJ4h4UpdgaBWdmx2iWsafHTrqmgIPNw== +"@typescript-eslint/types@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-6.21.0.tgz#205724c5123a8fef7ecd195075fa6e85bac3436d" + integrity sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg== -"@typescript-eslint/typescript-estree@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.3.tgz#ec5bb7ab4d3566818abaf0e4a8fa1958561b7279" - integrity sha512-YLQ3tJoS4VxLFYHTw21oe1/vIZPRqAO91z6Uv0Ss2BKm/Ag7/RVQBcXTGcXhgJMdA4U+HrKuY5gWlJlvoaKZ5g== +"@typescript-eslint/typescript-estree@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz#c47ae7901db3b8bddc3ecd73daff2d0895688c46" + integrity sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ== dependencies: - "@typescript-eslint/types" "6.7.3" - "@typescript-eslint/visitor-keys" "6.7.3" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" debug "^4.3.4" globby "^11.1.0" is-glob "^4.0.3" + minimatch "9.0.3" semver "^7.5.4" ts-api-utils "^1.0.1" -"@typescript-eslint/utils@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-6.7.3.tgz#96c655816c373135b07282d67407cb577f62e143" - integrity sha512-vzLkVder21GpWRrmSR9JxGZ5+ibIUSudXlW52qeKpzUEQhRSmyZiVDDj3crAth7+5tmN1ulvgKaCU2f/bPRCzg== +"@typescript-eslint/utils@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-6.21.0.tgz#4714e7a6b39e773c1c8e97ec587f520840cd8134" + integrity sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ== dependencies: "@eslint-community/eslint-utils" "^4.4.0" "@types/json-schema" "^7.0.12" "@types/semver" "^7.5.0" - "@typescript-eslint/scope-manager" "6.7.3" - "@typescript-eslint/types" "6.7.3" - "@typescript-eslint/typescript-estree" "6.7.3" + "@typescript-eslint/scope-manager" "6.21.0" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/typescript-estree" "6.21.0" semver "^7.5.4" -"@typescript-eslint/visitor-keys@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.3.tgz#83809631ca12909bd2083558d2f93f5747deebb2" - integrity sha512-HEVXkU9IB+nk9o63CeICMHxFWbHWr3E1mpilIQBe9+7L/lH97rleFLVtYsfnWB+JVMaiFnEaxvknvmIzX+CqVg== +"@typescript-eslint/visitor-keys@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz#87a99d077aa507e20e238b11d56cc26ade45fe47" + integrity sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A== dependencies: - "@typescript-eslint/types" "6.7.3" + "@typescript-eslint/types" "6.21.0" eslint-visitor-keys "^3.4.1" abort-controller@^3.0.0: @@ -1392,13 +1393,20 @@ cross-spawn@^7.0.2, cross-spawn@^7.0.3: shebang-command "^2.0.0" which "^2.0.1" -debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4: +debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2: version "4.3.4" resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== dependencies: ms "2.1.2" +debug@^4.3.4: + version "4.3.7" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.7.tgz#87945b4151a011d76d95a198d7111c865c360a52" + integrity sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ== + dependencies: + ms "^2.1.3" + dedent@^1.0.0: version "1.5.1" resolved "https://registry.yarnpkg.com/dedent/-/dedent-1.5.1.tgz#4f3fc94c8b711e9bb2800d185cd6ad20f2a90aff" @@ -1546,12 +1554,7 @@ eslint-scope@^7.2.2: esrecurse "^4.3.0" estraverse "^5.2.0" -eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1: - version "3.4.2" - resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.2.tgz#8c2095440eca8c933bedcadf16fefa44dbe9ba5f" - integrity sha512-8drBzUEyZ2llkpCA67iYrgEssKDUu68V8ChqqOfFupIaG/LCVPUT+CoGJpT77zJprs4T/W7p07LP7zAIMuweVw== - -eslint-visitor-keys@^3.4.3: +eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1, eslint-visitor-keys@^3.4.3: version "3.4.3" resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz#0cd72fe8550e3c2eae156a96a4dddcd1c8ac5800" integrity sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag== @@ -1716,18 +1719,7 @@ fast-glob@^3.2.12: merge2 "^1.3.0" micromatch "^4.0.4" -fast-glob@^3.2.9: - version "3.3.1" - resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.1.tgz#784b4e897340f3dbbef17413b3f11acf03c874c4" - integrity sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg== - dependencies: - "@nodelib/fs.stat" "^2.0.2" - "@nodelib/fs.walk" "^1.2.3" - glob-parent "^5.1.2" - merge2 "^1.3.0" - micromatch "^4.0.4" - -fast-glob@^3.3.0: +fast-glob@^3.2.9, fast-glob@^3.3.0: version "3.3.2" resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.2.tgz#a904501e57cfdd2ffcded45e99a54fef55e46129" integrity sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow== @@ -1749,9 +1741,9 @@ fast-levenshtein@^2.0.6: integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== fastq@^1.6.0: - version "1.15.0" - resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.15.0.tgz#d04d07c6a2a68fe4599fea8d2e103a937fae6b3a" - integrity sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw== + version "1.17.1" + resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.17.1.tgz#2a523f07a4e7b1e81a42b91b8bf2254107753b47" + integrity sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w== dependencies: reusify "^1.0.4" @@ -1974,9 +1966,9 @@ iconv-lite@^0.6.3: safer-buffer ">= 2.1.2 < 3.0.0" ignore@^5.2.0, ignore@^5.2.4: - version "5.2.4" - resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.2.4.tgz#a291c0c6178ff1b960befe47fcdec301674a6324" - integrity sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ== + version "5.3.2" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.2.tgz#3cd40e729f3643fd87cb04e50bf0eb722bc596f5" + integrity sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g== import-fresh@^3.2.1: version "3.3.0" @@ -2681,6 +2673,13 @@ mimic-fn@^4.0.0: resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-4.0.0.tgz#60a90550d5cb0b239cca65d893b1a53b29871ecc" integrity sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw== +minimatch@9.0.3: + version "9.0.3" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.3.tgz#a6e00c3de44c3a542bfaae70abfc22420a6da825" + integrity sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg== + dependencies: + brace-expansion "^2.0.1" + minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@^3.1.2: version "3.1.2" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" @@ -2710,7 +2709,7 @@ ms@2.1.2: resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== -ms@^2.0.0: +ms@^2.0.0, ms@^2.1.3: version "2.1.3" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== @@ -3075,13 +3074,18 @@ semver@^6.3.0, semver@^6.3.1: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== -semver@^7.5.3, semver@^7.5.4: +semver@^7.5.3: version "7.5.4" resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.4.tgz#483986ec4ed38e1c6c48c34894a9182dbff68a6e" integrity sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA== dependencies: lru-cache "^6.0.0" +semver@^7.5.4: + version "7.6.3" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" + integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== + shebang-command@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" @@ -3278,9 +3282,9 @@ tr46@~0.0.3: integrity sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o= ts-api-utils@^1.0.1: - version "1.0.3" - resolved "https://registry.yarnpkg.com/ts-api-utils/-/ts-api-utils-1.0.3.tgz#f12c1c781d04427313dbac808f453f050e54a331" - integrity sha512-wNMeqtMz5NtwpT/UZGY5alT+VoKdSsOOP/kqHFcUW1P/VRhH2wJ48+DN2WwUliNbQ976ETwDL0Ifd2VVvgonvg== + version "1.3.0" + resolved "https://registry.yarnpkg.com/ts-api-utils/-/ts-api-utils-1.3.0.tgz#4b490e27129f1e8e686b45cc4ab63714dc60eea1" + integrity sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ== ts-jest@^29.1.0: version "29.1.1" From c239b5cf7723e825250a26cabefeb27aa398be23 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Oct 2024 05:07:12 +0000 Subject: [PATCH 008/246] release: 4.68.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 7de9a93f1..eafafb2cf 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.68.2" + ".": "4.68.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 93cf66d4e..604e5183c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.68.3 (2024-10-23) + +Full Changelog: [v4.68.2...v4.68.3](https://github.com/openai/openai-node/compare/v4.68.2...v4.68.3) + +### Chores + +* **internal:** bumps eslint and related dependencies ([#1143](https://github.com/openai/openai-node/issues/1143)) ([2643f42](https://github.com/openai/openai-node/commit/2643f42a36208c36daf23470ffcd227a891284eb)) + ## 4.68.2 (2024-10-22) Full Changelog: [v4.68.1...v4.68.2](https://github.com/openai/openai-node/compare/v4.68.1...v4.68.2) diff --git a/README.md b/README.md index 5011b82a1..16d0450bb 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.68.2/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.68.3/mod.ts'; ``` diff --git a/package.json b/package.json index 0eaebee91..4fc07b525 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.68.2", + "version": "4.68.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index c2276e5ea..fa0fd26ea 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.68.2/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.68.3/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index bb7f3f7bd..2657a62ac 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.68.2'; // x-release-please-version +export const VERSION = '4.68.3'; // x-release-please-version From 748d77154f570b705c198ccd802a7dc0863690d5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Oct 2024 21:56:39 +0000 Subject: [PATCH 009/246] chore(internal): update spec version (#1146) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index e1a430e50..0b0872556 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-f9320ebf347140052c7f8b0bc5c7db24f5e367c368c8cb34c3606af4e2b6591b.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b60d5559d5150ecd3b49136064e5e251d832899770ff385b711378389afba370.yml From 813cb4413d7b03bddf6885df7fd4c5928e2ec49f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Oct 2024 21:57:05 +0000 Subject: [PATCH 010/246] release: 4.68.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index eafafb2cf..b32797c27 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.68.3" + ".": "4.68.4" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 604e5183c..130b287c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.68.4 (2024-10-23) + +Full Changelog: [v4.68.3...v4.68.4](https://github.com/openai/openai-node/compare/v4.68.3...v4.68.4) + +### Chores + +* **internal:** update spec version ([#1146](https://github.com/openai/openai-node/issues/1146)) ([0165a8d](https://github.com/openai/openai-node/commit/0165a8d79340ede49557e05fd00d6fff9d69d930)) + ## 4.68.3 (2024-10-23) Full Changelog: [v4.68.2...v4.68.3](https://github.com/openai/openai-node/compare/v4.68.2...v4.68.3) diff --git a/README.md b/README.md index 16d0450bb..3bf20a026 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.68.3/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.68.4/mod.ts'; ``` diff --git a/package.json b/package.json index 4fc07b525..ce87796d1 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.68.3", + "version": "4.68.4", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index fa0fd26ea..6a67bcdde 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.68.3/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.68.4/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 2657a62ac..5c2c17eaf 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.68.3'; // x-release-please-version +export const VERSION = '4.68.4'; // x-release-please-version From d4966566cb2d804b9892986fe4871eb051a416f0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2024 17:46:33 +0000 Subject: [PATCH 011/246] docs(readme): minor typo fixes (#1154) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3bf20a026..9aabd058e 100644 --- a/README.md +++ b/README.md @@ -444,7 +444,7 @@ Note that requests which time out will be [retried twice by default](#retries). ## Auto-pagination List methods in the OpenAI API are paginated. -You can use `for await … of` syntax to iterate through items across all pages: +You can use the `for await … of` syntax to iterate through items across all pages: ```ts async function fetchAllFineTuningJobs(params) { @@ -457,7 +457,7 @@ async function fetchAllFineTuningJobs(params) { } ``` -Alternatively, you can make request a single page at a time: +Alternatively, you can request a single page at a time: ```ts let page = await client.fineTuning.jobs.list({ limit: 20 }); From 8cafc09f3f4795d9a904f63b067d4f05292dab74 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2024 20:33:10 +0000 Subject: [PATCH 012/246] fix(internal): support pnpm git installs (#1156) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index ce87796d1..9f9b3ee86 100644 --- a/package.json +++ b/package.json @@ -10,7 +10,7 @@ "license": "Apache-2.0", "packageManager": "yarn@1.22.22", "files": [ - "*" + "**/*" ], "private": false, "scripts": { From b8e5d396b1524aaeef31c408d79fb30314e50577 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 16:11:29 +0000 Subject: [PATCH 013/246] feat(api): add new, expressive voices for Realtime and Audio in Chat Completions (#1157) https://platform.openai.com/docs/changelog --- .stats.yml | 2 +- src/resources/chat/completions.ts | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.stats.yml b/.stats.yml index 0b0872556..39413df44 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b60d5559d5150ecd3b49136064e5e251d832899770ff385b711378389afba370.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-7b0a5d715d94f75ac7795bd4d2175a0e3243af9b935a86c273f371e45583140f.yml diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 97174ec1b..d439e9a25 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -250,10 +250,10 @@ export interface ChatCompletionAudioParam { format: 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16'; /** - * Specifies the voice type. Supported voices are `alloy`, `echo`, `fable`, `onyx`, - * `nova`, and `shimmer`. + * The voice the model uses to respond. Supported voices are `alloy`, `ash`, + * `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`. */ - voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer'; + voice: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; } /** @@ -308,7 +308,7 @@ export interface ChatCompletionChunk { * contains a null value except for the last chunk which contains the token usage * statistics for the entire request. */ - usage?: CompletionsAPI.CompletionUsage; + usage?: CompletionsAPI.CompletionUsage | null; } export namespace ChatCompletionChunk { From 622c80aaa17c486cbd16cf620b3bee6b73650ba4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 16:11:59 +0000 Subject: [PATCH 014/246] release: 4.69.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 18 ++++++++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 23 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b32797c27..65aac9575 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.68.4" + ".": "4.69.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 130b287c2..b3b52aaa3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## 4.69.0 (2024-10-30) + +Full Changelog: [v4.68.4...v4.69.0](https://github.com/openai/openai-node/compare/v4.68.4...v4.69.0) + +### Features + +* **api:** add new, expressive voices for Realtime and Audio in Chat Completions ([#1157](https://github.com/openai/openai-node/issues/1157)) ([12e501c](https://github.com/openai/openai-node/commit/12e501c8a215a2af29b9b8fceedc5935b6f2feef)) + + +### Bug Fixes + +* **internal:** support pnpm git installs ([#1156](https://github.com/openai/openai-node/issues/1156)) ([b744c5b](https://github.com/openai/openai-node/commit/b744c5b609533e9a6694d6cae0425fe9cd37e26c)) + + +### Documentation + +* **readme:** minor typo fixes ([#1154](https://github.com/openai/openai-node/issues/1154)) ([c6c9f9a](https://github.com/openai/openai-node/commit/c6c9f9aaf75f643016ad73574a7e24a228b5c60f)) + ## 4.68.4 (2024-10-23) Full Changelog: [v4.68.3...v4.68.4](https://github.com/openai/openai-node/compare/v4.68.3...v4.68.4) diff --git a/README.md b/README.md index 9aabd058e..776ea4049 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.68.4/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.69.0/mod.ts'; ``` diff --git a/package.json b/package.json index 9f9b3ee86..9e32feabb 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.68.4", + "version": "4.69.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 6a67bcdde..be17942df 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.68.4/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.69.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 5c2c17eaf..be250f2d6 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.68.4'; // x-release-please-version +export const VERSION = '4.69.0'; // x-release-please-version From 6421d69314e89cde4a85d2a70f1dae4cf570d0f7 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Fri, 1 Nov 2024 04:32:16 +0000 Subject: [PATCH 015/246] refactor: use type imports for type-only imports (#1159) --- src/index.ts | 377 ++++++++++++------ src/lib/AssistantStream.ts | 3 +- src/resources/audio/audio.ts | 70 ++-- src/resources/audio/index.ts | 24 +- src/resources/audio/speech.ts | 6 +- src/resources/audio/transcriptions.ts | 21 +- src/resources/audio/translations.ts | 17 +- src/resources/batches.ts | 18 +- src/resources/beta/assistants.ts | 37 +- src/resources/beta/beta.ts | 158 +++++--- src/resources/beta/index.ts | 82 ++-- src/resources/beta/threads/index.ts | 126 +++--- src/resources/beta/threads/messages.ts | 71 ++-- src/resources/beta/threads/runs/index.ts | 72 ++-- src/resources/beta/threads/runs/runs.ts | 113 ++++-- src/resources/beta/threads/runs/steps.ts | 48 ++- src/resources/beta/threads/threads.ts | 198 ++++++--- .../beta/vector-stores/file-batches.ts | 11 +- src/resources/beta/vector-stores/files.ts | 17 +- src/resources/beta/vector-stores/index.ts | 48 +-- .../beta/vector-stores/vector-stores.ts | 76 ++-- src/resources/chat/chat.ts | 114 ++++-- src/resources/chat/completions.ts | 71 ++-- src/resources/chat/index.ts | 66 +-- src/resources/completions.ts | 16 +- src/resources/embeddings.ts | 13 +- src/resources/files.ts | 21 +- src/resources/fine-tuning/fine-tuning.ts | 43 +- src/resources/fine-tuning/index.ts | 16 +- src/resources/fine-tuning/jobs/checkpoints.ts | 13 +- src/resources/fine-tuning/jobs/index.ts | 28 +- src/resources/fine-tuning/jobs/jobs.ts | 47 ++- src/resources/images.ts | 17 +- src/resources/index.ts | 74 ++-- src/resources/models.ts | 9 +- src/resources/moderations.ts | 19 +- src/resources/uploads/index.ts | 4 +- src/resources/uploads/parts.ts | 6 +- src/resources/uploads/uploads.ts | 19 +- tsconfig.json | 2 +- 40 files changed, 1307 insertions(+), 884 deletions(-) diff --git a/src/index.ts b/src/index.ts index 56108223a..c1506997b 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,12 +1,108 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Errors from './error'; -import * as Uploads from './uploads'; import { type Agent, type RequestInit } from './_shims/index'; import * as qs from './internal/qs'; import * as Core from './core'; +import * as Errors from './error'; import * as Pagination from './pagination'; +import { type CursorPageParams, CursorPageResponse, PageResponse } from './pagination'; +import * as Uploads from './uploads'; import * as API from './resources/index'; +import { + Batch, + BatchCreateParams, + BatchError, + BatchListParams, + BatchRequestCounts, + Batches, + BatchesPage, +} from './resources/batches'; +import { + Completion, + CompletionChoice, + CompletionCreateParams, + CompletionCreateParamsNonStreaming, + CompletionCreateParamsStreaming, + CompletionUsage, + Completions, +} from './resources/completions'; +import { + CreateEmbeddingResponse, + Embedding, + EmbeddingCreateParams, + EmbeddingModel, + Embeddings, +} from './resources/embeddings'; +import { + FileContent, + FileCreateParams, + FileDeleted, + FileListParams, + FileObject, + FileObjectsPage, + FilePurpose, + Files, +} from './resources/files'; +import { + Image, + ImageCreateVariationParams, + ImageEditParams, + ImageGenerateParams, + ImageModel, + Images, + ImagesResponse, +} from './resources/images'; +import { Model, ModelDeleted, Models, ModelsPage } from './resources/models'; +import { + Moderation, + ModerationCreateParams, + ModerationCreateResponse, + ModerationImageURLInput, + ModerationModel, + ModerationMultiModalInput, + ModerationTextInput, + Moderations, +} from './resources/moderations'; +import { Audio, AudioModel, AudioResponseFormat } from './resources/audio/audio'; +import { Beta } from './resources/beta/beta'; +import { Chat, ChatModel } from './resources/chat/chat'; +import { + ChatCompletion, + ChatCompletionAssistantMessageParam, + ChatCompletionAudio, + ChatCompletionAudioParam, + ChatCompletionChunk, + ChatCompletionContentPart, + ChatCompletionContentPartImage, + ChatCompletionContentPartInputAudio, + ChatCompletionContentPartRefusal, + ChatCompletionContentPartText, + ChatCompletionCreateParams, + ChatCompletionCreateParamsNonStreaming, + ChatCompletionCreateParamsStreaming, + ChatCompletionFunctionCallOption, + ChatCompletionFunctionMessageParam, + ChatCompletionMessage, + ChatCompletionMessageParam, + ChatCompletionMessageToolCall, + ChatCompletionModality, + ChatCompletionNamedToolChoice, + ChatCompletionRole, + ChatCompletionStreamOptions, + ChatCompletionSystemMessageParam, + ChatCompletionTokenLogprob, + ChatCompletionTool, + ChatCompletionToolChoiceOption, + ChatCompletionToolMessageParam, + ChatCompletionUserMessageParam, +} from './resources/chat/completions'; +import { FineTuning } from './resources/fine-tuning/fine-tuning'; +import { + Upload, + UploadCompleteParams, + UploadCreateParams, + Uploads as UploadsAPIUploads, +} from './resources/uploads/uploads'; export interface ClientOptions { /** @@ -209,138 +305,167 @@ export class OpenAI extends Core.APIClient { static fileFromPath = Uploads.fileFromPath; } -export const { - OpenAIError, - APIError, - APIConnectionError, - APIConnectionTimeoutError, - APIUserAbortError, - NotFoundError, - ConflictError, - RateLimitError, - BadRequestError, - AuthenticationError, - InternalServerError, - PermissionDeniedError, - UnprocessableEntityError, -} = Errors; +export const OpenAIError = Errors.OpenAIError; +export const APIError = Errors.APIError; +export const APIConnectionError = Errors.APIConnectionError; +export const APIConnectionTimeoutError = Errors.APIConnectionTimeoutError; +export const APIUserAbortError = Errors.APIUserAbortError; +export const NotFoundError = Errors.NotFoundError; +export const ConflictError = Errors.ConflictError; +export const RateLimitError = Errors.RateLimitError; +export const BadRequestError = Errors.BadRequestError; +export const AuthenticationError = Errors.AuthenticationError; +export const InternalServerError = Errors.InternalServerError; +export const PermissionDeniedError = Errors.PermissionDeniedError; +export const UnprocessableEntityError = Errors.UnprocessableEntityError; export import toFile = Uploads.toFile; export import fileFromPath = Uploads.fileFromPath; -export namespace OpenAI { - export import RequestOptions = Core.RequestOptions; +OpenAI.Completions = Completions; +OpenAI.Chat = Chat; +OpenAI.Embeddings = Embeddings; +OpenAI.Files = Files; +OpenAI.FileObjectsPage = FileObjectsPage; +OpenAI.Images = Images; +OpenAI.Audio = Audio; +OpenAI.Moderations = Moderations; +OpenAI.Models = Models; +OpenAI.ModelsPage = ModelsPage; +OpenAI.FineTuning = FineTuning; +OpenAI.Beta = Beta; +OpenAI.Batches = Batches; +OpenAI.BatchesPage = BatchesPage; +OpenAI.Uploads = UploadsAPIUploads; + +export declare namespace OpenAI { + export type RequestOptions = Core.RequestOptions; export import Page = Pagination.Page; - export import PageResponse = Pagination.PageResponse; + export { type PageResponse as PageResponse }; export import CursorPage = Pagination.CursorPage; - export import CursorPageParams = Pagination.CursorPageParams; - export import CursorPageResponse = Pagination.CursorPageResponse; - - export import Completions = API.Completions; - export import Completion = API.Completion; - export import CompletionChoice = API.CompletionChoice; - export import CompletionUsage = API.CompletionUsage; - export import CompletionCreateParams = API.CompletionCreateParams; - export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; - export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; - - export import Chat = API.Chat; - export import ChatModel = API.ChatModel; - export import ChatCompletion = API.ChatCompletion; - export import ChatCompletionAssistantMessageParam = API.ChatCompletionAssistantMessageParam; - export import ChatCompletionAudio = API.ChatCompletionAudio; - export import ChatCompletionAudioParam = API.ChatCompletionAudioParam; - export import ChatCompletionChunk = API.ChatCompletionChunk; - export import ChatCompletionContentPart = API.ChatCompletionContentPart; - export import ChatCompletionContentPartImage = API.ChatCompletionContentPartImage; - export import ChatCompletionContentPartInputAudio = API.ChatCompletionContentPartInputAudio; - export import ChatCompletionContentPartRefusal = API.ChatCompletionContentPartRefusal; - export import ChatCompletionContentPartText = API.ChatCompletionContentPartText; - export import ChatCompletionFunctionCallOption = API.ChatCompletionFunctionCallOption; - export import ChatCompletionFunctionMessageParam = API.ChatCompletionFunctionMessageParam; - export import ChatCompletionMessage = API.ChatCompletionMessage; - export import ChatCompletionMessageParam = API.ChatCompletionMessageParam; - export import ChatCompletionMessageToolCall = API.ChatCompletionMessageToolCall; - export import ChatCompletionModality = API.ChatCompletionModality; - export import ChatCompletionNamedToolChoice = API.ChatCompletionNamedToolChoice; - export import ChatCompletionRole = API.ChatCompletionRole; - export import ChatCompletionStreamOptions = API.ChatCompletionStreamOptions; - export import ChatCompletionSystemMessageParam = API.ChatCompletionSystemMessageParam; - export import ChatCompletionTokenLogprob = API.ChatCompletionTokenLogprob; - export import ChatCompletionTool = API.ChatCompletionTool; - export import ChatCompletionToolChoiceOption = API.ChatCompletionToolChoiceOption; - export import ChatCompletionToolMessageParam = API.ChatCompletionToolMessageParam; - export import ChatCompletionUserMessageParam = API.ChatCompletionUserMessageParam; - export import ChatCompletionCreateParams = API.ChatCompletionCreateParams; - export import ChatCompletionCreateParamsNonStreaming = API.ChatCompletionCreateParamsNonStreaming; - export import ChatCompletionCreateParamsStreaming = API.ChatCompletionCreateParamsStreaming; - - export import Embeddings = API.Embeddings; - export import CreateEmbeddingResponse = API.CreateEmbeddingResponse; - export import Embedding = API.Embedding; - export import EmbeddingModel = API.EmbeddingModel; - export import EmbeddingCreateParams = API.EmbeddingCreateParams; - - export import Files = API.Files; - export import FileContent = API.FileContent; - export import FileDeleted = API.FileDeleted; - export import FileObject = API.FileObject; - export import FilePurpose = API.FilePurpose; - export import FileObjectsPage = API.FileObjectsPage; - export import FileCreateParams = API.FileCreateParams; - export import FileListParams = API.FileListParams; - - export import Images = API.Images; - export import Image = API.Image; - export import ImageModel = API.ImageModel; - export import ImagesResponse = API.ImagesResponse; - export import ImageCreateVariationParams = API.ImageCreateVariationParams; - export import ImageEditParams = API.ImageEditParams; - export import ImageGenerateParams = API.ImageGenerateParams; - - export import Audio = API.Audio; - export import AudioModel = API.AudioModel; - export import AudioResponseFormat = API.AudioResponseFormat; - - export import Moderations = API.Moderations; - export import Moderation = API.Moderation; - export import ModerationImageURLInput = API.ModerationImageURLInput; - export import ModerationModel = API.ModerationModel; - export import ModerationMultiModalInput = API.ModerationMultiModalInput; - export import ModerationTextInput = API.ModerationTextInput; - export import ModerationCreateResponse = API.ModerationCreateResponse; - export import ModerationCreateParams = API.ModerationCreateParams; - - export import Models = API.Models; - export import Model = API.Model; - export import ModelDeleted = API.ModelDeleted; - export import ModelsPage = API.ModelsPage; - - export import FineTuning = API.FineTuning; - - export import Beta = API.Beta; - - export import Batches = API.Batches; - export import Batch = API.Batch; - export import BatchError = API.BatchError; - export import BatchRequestCounts = API.BatchRequestCounts; - export import BatchesPage = API.BatchesPage; - export import BatchCreateParams = API.BatchCreateParams; - export import BatchListParams = API.BatchListParams; - - export import Uploads = API.Uploads; - export import Upload = API.Upload; - export import UploadCreateParams = API.UploadCreateParams; - export import UploadCompleteParams = API.UploadCompleteParams; - - export import ErrorObject = API.ErrorObject; - export import FunctionDefinition = API.FunctionDefinition; - export import FunctionParameters = API.FunctionParameters; - export import ResponseFormatJSONObject = API.ResponseFormatJSONObject; - export import ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; - export import ResponseFormatText = API.ResponseFormatText; + export { type CursorPageParams as CursorPageParams, type CursorPageResponse as CursorPageResponse }; + + export { + Completions as Completions, + type Completion as Completion, + type CompletionChoice as CompletionChoice, + type CompletionUsage as CompletionUsage, + type CompletionCreateParams as CompletionCreateParams, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + }; + + export { + Chat as Chat, + type ChatModel as ChatModel, + type ChatCompletion as ChatCompletion, + type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam, + type ChatCompletionAudio as ChatCompletionAudio, + type ChatCompletionAudioParam as ChatCompletionAudioParam, + type ChatCompletionChunk as ChatCompletionChunk, + type ChatCompletionContentPart as ChatCompletionContentPart, + type ChatCompletionContentPartImage as ChatCompletionContentPartImage, + type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, + type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, + type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, + type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, + type ChatCompletionMessage as ChatCompletionMessage, + type ChatCompletionMessageParam as ChatCompletionMessageParam, + type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, + type ChatCompletionModality as ChatCompletionModality, + type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, + type ChatCompletionRole as ChatCompletionRole, + type ChatCompletionStreamOptions as ChatCompletionStreamOptions, + type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, + type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, + type ChatCompletionTool as ChatCompletionTool, + type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, + type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, + type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + type ChatCompletionCreateParams as ChatCompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, + type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming, + }; + + export { + Embeddings as Embeddings, + type CreateEmbeddingResponse as CreateEmbeddingResponse, + type Embedding as Embedding, + type EmbeddingModel as EmbeddingModel, + type EmbeddingCreateParams as EmbeddingCreateParams, + }; + + export { + Files as Files, + type FileContent as FileContent, + type FileDeleted as FileDeleted, + type FileObject as FileObject, + type FilePurpose as FilePurpose, + FileObjectsPage as FileObjectsPage, + type FileCreateParams as FileCreateParams, + type FileListParams as FileListParams, + }; + + export { + Images as Images, + type Image as Image, + type ImageModel as ImageModel, + type ImagesResponse as ImagesResponse, + type ImageCreateVariationParams as ImageCreateVariationParams, + type ImageEditParams as ImageEditParams, + type ImageGenerateParams as ImageGenerateParams, + }; + + export { Audio as Audio, type AudioModel as AudioModel, type AudioResponseFormat as AudioResponseFormat }; + + export { + Moderations as Moderations, + type Moderation as Moderation, + type ModerationImageURLInput as ModerationImageURLInput, + type ModerationModel as ModerationModel, + type ModerationMultiModalInput as ModerationMultiModalInput, + type ModerationTextInput as ModerationTextInput, + type ModerationCreateResponse as ModerationCreateResponse, + type ModerationCreateParams as ModerationCreateParams, + }; + + export { + Models as Models, + type Model as Model, + type ModelDeleted as ModelDeleted, + ModelsPage as ModelsPage, + }; + + export { FineTuning as FineTuning }; + + export { Beta as Beta }; + + export { + Batches as Batches, + type Batch as Batch, + type BatchError as BatchError, + type BatchRequestCounts as BatchRequestCounts, + BatchesPage as BatchesPage, + type BatchCreateParams as BatchCreateParams, + type BatchListParams as BatchListParams, + }; + + export { + UploadsAPIUploads as Uploads, + type Upload as Upload, + type UploadCreateParams as UploadCreateParams, + type UploadCompleteParams as UploadCompleteParams, + }; + + export type ErrorObject = API.ErrorObject; + export type FunctionDefinition = API.FunctionDefinition; + export type FunctionParameters = API.FunctionParameters; + export type ResponseFormatJSONObject = API.ResponseFormatJSONObject; + export type ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; + export type ResponseFormatText = API.ResponseFormatText; } // ---------------------- Azure ---------------------- diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts index 7c5ffb58e..c826c910e 100644 --- a/src/lib/AssistantStream.ts +++ b/src/lib/AssistantStream.ts @@ -6,7 +6,7 @@ import { Text, ImageFile, TextDelta, - Messages, + MessageDelta, MessageContent, } from 'openai/resources/beta/threads/messages'; import * as Core from 'openai/core'; @@ -31,7 +31,6 @@ import { import { RunStep, RunStepDelta, ToolCall, ToolCallDelta } from 'openai/resources/beta/threads/runs/steps'; import { ThreadCreateAndRunParamsBase, Threads } from 'openai/resources/beta/threads/threads'; import { BaseEvents, EventStream } from './EventStream'; -import MessageDelta = Messages.MessageDelta; export interface AssistantStreamEvents extends BaseEvents { run: (run: Run) => void; diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts index e06e28094..b9a7ad4f8 100644 --- a/src/resources/audio/audio.ts +++ b/src/resources/audio/audio.ts @@ -1,10 +1,26 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; -import * as AudioAPI from './audio'; import * as SpeechAPI from './speech'; +import { Speech, SpeechCreateParams, SpeechModel } from './speech'; import * as TranscriptionsAPI from './transcriptions'; +import { + Transcription, + TranscriptionCreateParams, + TranscriptionCreateResponse, + TranscriptionSegment, + TranscriptionVerbose, + TranscriptionWord, + Transcriptions, +} from './transcriptions'; import * as TranslationsAPI from './translations'; +import { + Translation, + TranslationCreateParams, + TranslationCreateResponse, + TranslationVerbose, + Translations, +} from './translations'; export class Audio extends APIResource { transcriptions: TranscriptionsAPI.Transcriptions = new TranscriptionsAPI.Transcriptions(this._client); @@ -20,30 +36,30 @@ export type AudioModel = 'whisper-1'; */ export type AudioResponseFormat = 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt'; -export namespace Audio { - export import AudioModel = AudioAPI.AudioModel; - export import AudioResponseFormat = AudioAPI.AudioResponseFormat; - export import Transcriptions = TranscriptionsAPI.Transcriptions; - export import Transcription = TranscriptionsAPI.Transcription; - export import TranscriptionSegment = TranscriptionsAPI.TranscriptionSegment; - export import TranscriptionVerbose = TranscriptionsAPI.TranscriptionVerbose; - export import TranscriptionWord = TranscriptionsAPI.TranscriptionWord; - export import TranscriptionCreateResponse = TranscriptionsAPI.TranscriptionCreateResponse; - export type TranscriptionCreateParams< - ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = - | AudioAPI.AudioResponseFormat - | undefined, - > = TranscriptionsAPI.TranscriptionCreateParams; - export import Translations = TranslationsAPI.Translations; - export import Translation = TranslationsAPI.Translation; - export import TranslationVerbose = TranslationsAPI.TranslationVerbose; - export import TranslationCreateResponse = TranslationsAPI.TranslationCreateResponse; - export type TranslationCreateParams< - ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = - | AudioAPI.AudioResponseFormat - | undefined, - > = TranslationsAPI.TranslationCreateParams; - export import Speech = SpeechAPI.Speech; - export import SpeechModel = SpeechAPI.SpeechModel; - export import SpeechCreateParams = SpeechAPI.SpeechCreateParams; +Audio.Transcriptions = Transcriptions; +Audio.Translations = Translations; +Audio.Speech = Speech; + +export declare namespace Audio { + export { type AudioModel as AudioModel, type AudioResponseFormat as AudioResponseFormat }; + + export { + Transcriptions as Transcriptions, + type Transcription as Transcription, + type TranscriptionSegment as TranscriptionSegment, + type TranscriptionVerbose as TranscriptionVerbose, + type TranscriptionWord as TranscriptionWord, + type TranscriptionCreateResponse as TranscriptionCreateResponse, + type TranscriptionCreateParams as TranscriptionCreateParams, + }; + + export { + Translations as Translations, + type Translation as Translation, + type TranslationVerbose as TranslationVerbose, + type TranslationCreateResponse as TranslationCreateResponse, + type TranslationCreateParams as TranslationCreateParams, + }; + + export { Speech as Speech, type SpeechModel as SpeechModel, type SpeechCreateParams as SpeechCreateParams }; } diff --git a/src/resources/audio/index.ts b/src/resources/audio/index.ts index 952c05b03..2bbe9e3ab 100644 --- a/src/resources/audio/index.ts +++ b/src/resources/audio/index.ts @@ -1,20 +1,20 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { AudioModel, AudioResponseFormat, Audio } from './audio'; -export { SpeechModel, SpeechCreateParams, Speech } from './speech'; +export { Audio, type AudioModel, type AudioResponseFormat } from './audio'; +export { Speech, type SpeechModel, type SpeechCreateParams } from './speech'; export { - Transcription, - TranscriptionSegment, - TranscriptionVerbose, - TranscriptionWord, - TranscriptionCreateResponse, - TranscriptionCreateParams, Transcriptions, + type Transcription, + type TranscriptionSegment, + type TranscriptionVerbose, + type TranscriptionWord, + type TranscriptionCreateResponse, + type TranscriptionCreateParams, } from './transcriptions'; export { - Translation, - TranslationVerbose, - TranslationCreateResponse, - TranslationCreateParams, Translations, + type Translation, + type TranslationVerbose, + type TranslationCreateResponse, + type TranslationCreateParams, } from './translations'; diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 34fb26b02..da99bf649 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -2,7 +2,6 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; -import * as SpeechAPI from './speech'; import { type Response } from '../../_shims/index'; export class Speech extends APIResource { @@ -49,7 +48,6 @@ export interface SpeechCreateParams { speed?: number; } -export namespace Speech { - export import SpeechModel = SpeechAPI.SpeechModel; - export import SpeechCreateParams = SpeechAPI.SpeechCreateParams; +export declare namespace Speech { + export { type SpeechModel as SpeechModel, type SpeechCreateParams as SpeechCreateParams }; } diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 902dc9e5f..dd4258787 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -2,7 +2,6 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; -import * as TranscriptionsAPI from './transcriptions'; import * as AudioAPI from './audio'; export class Transcriptions extends APIResource { @@ -205,15 +204,13 @@ export interface TranscriptionCreateParams< timestamp_granularities?: Array<'word' | 'segment'>; } -export namespace Transcriptions { - export import Transcription = TranscriptionsAPI.Transcription; - export import TranscriptionSegment = TranscriptionsAPI.TranscriptionSegment; - export import TranscriptionVerbose = TranscriptionsAPI.TranscriptionVerbose; - export import TranscriptionWord = TranscriptionsAPI.TranscriptionWord; - export import TranscriptionCreateResponse = TranscriptionsAPI.TranscriptionCreateResponse; - export type TranscriptionCreateParams< - ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = - | AudioAPI.AudioResponseFormat - | undefined, - > = TranscriptionsAPI.TranscriptionCreateParams; +export declare namespace Transcriptions { + export { + type Transcription as Transcription, + type TranscriptionSegment as TranscriptionSegment, + type TranscriptionVerbose as TranscriptionVerbose, + type TranscriptionWord as TranscriptionWord, + type TranscriptionCreateResponse as TranscriptionCreateResponse, + type TranscriptionCreateParams as TranscriptionCreateParams, + }; } diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index 36c2dc7c2..b98a95044 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -2,7 +2,6 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; -import * as TranslationsAPI from './translations'; import * as AudioAPI from './audio'; import * as TranscriptionsAPI from './transcriptions'; @@ -98,13 +97,11 @@ export interface TranslationCreateParams< temperature?: number; } -export namespace Translations { - export import Translation = TranslationsAPI.Translation; - export import TranslationVerbose = TranslationsAPI.TranslationVerbose; - export import TranslationCreateResponse = TranslationsAPI.TranslationCreateResponse; - export type TranslationCreateParams< - ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = - | AudioAPI.AudioResponseFormat - | undefined, - > = TranslationsAPI.TranslationCreateParams; +export declare namespace Translations { + export { + type Translation as Translation, + type TranslationVerbose as TranslationVerbose, + type TranslationCreateResponse as TranslationCreateResponse, + type TranslationCreateParams as TranslationCreateParams, + }; } diff --git a/src/resources/batches.ts b/src/resources/batches.ts index 738582f9e..e68e7569c 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -244,11 +244,15 @@ export interface BatchCreateParams { export interface BatchListParams extends CursorPageParams {} -export namespace Batches { - export import Batch = BatchesAPI.Batch; - export import BatchError = BatchesAPI.BatchError; - export import BatchRequestCounts = BatchesAPI.BatchRequestCounts; - export import BatchesPage = BatchesAPI.BatchesPage; - export import BatchCreateParams = BatchesAPI.BatchCreateParams; - export import BatchListParams = BatchesAPI.BatchListParams; +Batches.BatchesPage = BatchesPage; + +export declare namespace Batches { + export { + type Batch as Batch, + type BatchError as BatchError, + type BatchRequestCounts as BatchRequestCounts, + BatchesPage as BatchesPage, + type BatchCreateParams as BatchCreateParams, + type BatchListParams as BatchListParams, + }; } diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index aa7362297..6d48089ce 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -3,7 +3,6 @@ import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; import * as Core from '../../core'; -import * as AssistantsAPI from './assistants'; import * as Shared from '../shared'; import * as ChatAPI from '../chat/chat'; import * as MessagesAPI from './threads/messages'; @@ -1396,20 +1395,24 @@ export interface AssistantListParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export namespace Assistants { - export import Assistant = AssistantsAPI.Assistant; - export import AssistantDeleted = AssistantsAPI.AssistantDeleted; - export import AssistantStreamEvent = AssistantsAPI.AssistantStreamEvent; - export import AssistantTool = AssistantsAPI.AssistantTool; - export import CodeInterpreterTool = AssistantsAPI.CodeInterpreterTool; - export import FileSearchTool = AssistantsAPI.FileSearchTool; - export import FunctionTool = AssistantsAPI.FunctionTool; - export import MessageStreamEvent = AssistantsAPI.MessageStreamEvent; - export import RunStepStreamEvent = AssistantsAPI.RunStepStreamEvent; - export import RunStreamEvent = AssistantsAPI.RunStreamEvent; - export import ThreadStreamEvent = AssistantsAPI.ThreadStreamEvent; - export import AssistantsPage = AssistantsAPI.AssistantsPage; - export import AssistantCreateParams = AssistantsAPI.AssistantCreateParams; - export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams; - export import AssistantListParams = AssistantsAPI.AssistantListParams; +Assistants.AssistantsPage = AssistantsPage; + +export declare namespace Assistants { + export { + type Assistant as Assistant, + type AssistantDeleted as AssistantDeleted, + type AssistantStreamEvent as AssistantStreamEvent, + type AssistantTool as AssistantTool, + type CodeInterpreterTool as CodeInterpreterTool, + type FileSearchTool as FileSearchTool, + type FunctionTool as FunctionTool, + type MessageStreamEvent as MessageStreamEvent, + type RunStepStreamEvent as RunStepStreamEvent, + type RunStreamEvent as RunStreamEvent, + type ThreadStreamEvent as ThreadStreamEvent, + AssistantsPage as AssistantsPage, + type AssistantCreateParams as AssistantCreateParams, + type AssistantUpdateParams as AssistantUpdateParams, + type AssistantListParams as AssistantListParams, + }; } diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index 0bcf217a8..b904abe4a 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -3,8 +3,59 @@ import { APIResource } from '../../resource'; import * as AssistantsAPI from './assistants'; import * as ChatAPI from './chat/chat'; +import { + Assistant, + AssistantCreateParams, + AssistantDeleted, + AssistantListParams, + AssistantStreamEvent, + AssistantTool, + AssistantUpdateParams, + Assistants, + AssistantsPage, + CodeInterpreterTool, + FileSearchTool, + FunctionTool, + MessageStreamEvent, + RunStepStreamEvent, + RunStreamEvent, + ThreadStreamEvent, +} from './assistants'; import * as ThreadsAPI from './threads/threads'; +import { + AssistantResponseFormatOption, + AssistantToolChoice, + AssistantToolChoiceFunction, + AssistantToolChoiceOption, + Thread, + ThreadCreateAndRunParams, + ThreadCreateAndRunParamsNonStreaming, + ThreadCreateAndRunParamsStreaming, + ThreadCreateAndRunPollParams, + ThreadCreateAndRunStreamParams, + ThreadCreateParams, + ThreadDeleted, + ThreadUpdateParams, + Threads, +} from './threads/threads'; import * as VectorStoresAPI from './vector-stores/vector-stores'; +import { + AutoFileChunkingStrategyParam, + FileChunkingStrategy, + FileChunkingStrategyParam, + OtherFileChunkingStrategyObject, + StaticFileChunkingStrategy, + StaticFileChunkingStrategyObject, + StaticFileChunkingStrategyParam, + VectorStore, + VectorStoreCreateParams, + VectorStoreDeleted, + VectorStoreListParams, + VectorStoreUpdateParams, + VectorStores, + VectorStoresPage, +} from './vector-stores/vector-stores'; +import { Chat } from './chat/chat'; export class Beta extends APIResource { vectorStores: VectorStoresAPI.VectorStores = new VectorStoresAPI.VectorStores(this._client); @@ -13,50 +64,65 @@ export class Beta extends APIResource { threads: ThreadsAPI.Threads = new ThreadsAPI.Threads(this._client); } -export namespace Beta { - export import VectorStores = VectorStoresAPI.VectorStores; - export import AutoFileChunkingStrategyParam = VectorStoresAPI.AutoFileChunkingStrategyParam; - export import FileChunkingStrategy = VectorStoresAPI.FileChunkingStrategy; - export import FileChunkingStrategyParam = VectorStoresAPI.FileChunkingStrategyParam; - export import OtherFileChunkingStrategyObject = VectorStoresAPI.OtherFileChunkingStrategyObject; - export import StaticFileChunkingStrategy = VectorStoresAPI.StaticFileChunkingStrategy; - export import StaticFileChunkingStrategyObject = VectorStoresAPI.StaticFileChunkingStrategyObject; - export import StaticFileChunkingStrategyParam = VectorStoresAPI.StaticFileChunkingStrategyParam; - export import VectorStore = VectorStoresAPI.VectorStore; - export import VectorStoreDeleted = VectorStoresAPI.VectorStoreDeleted; - export import VectorStoresPage = VectorStoresAPI.VectorStoresPage; - export import VectorStoreCreateParams = VectorStoresAPI.VectorStoreCreateParams; - export import VectorStoreUpdateParams = VectorStoresAPI.VectorStoreUpdateParams; - export import VectorStoreListParams = VectorStoresAPI.VectorStoreListParams; - export import Chat = ChatAPI.Chat; - export import Assistants = AssistantsAPI.Assistants; - export import Assistant = AssistantsAPI.Assistant; - export import AssistantDeleted = AssistantsAPI.AssistantDeleted; - export import AssistantStreamEvent = AssistantsAPI.AssistantStreamEvent; - export import AssistantTool = AssistantsAPI.AssistantTool; - export import CodeInterpreterTool = AssistantsAPI.CodeInterpreterTool; - export import FileSearchTool = AssistantsAPI.FileSearchTool; - export import FunctionTool = AssistantsAPI.FunctionTool; - export import MessageStreamEvent = AssistantsAPI.MessageStreamEvent; - export import RunStepStreamEvent = AssistantsAPI.RunStepStreamEvent; - export import RunStreamEvent = AssistantsAPI.RunStreamEvent; - export import ThreadStreamEvent = AssistantsAPI.ThreadStreamEvent; - export import AssistantsPage = AssistantsAPI.AssistantsPage; - export import AssistantCreateParams = AssistantsAPI.AssistantCreateParams; - export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams; - export import AssistantListParams = AssistantsAPI.AssistantListParams; - export import Threads = ThreadsAPI.Threads; - export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption; - export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice; - export import AssistantToolChoiceFunction = ThreadsAPI.AssistantToolChoiceFunction; - export import AssistantToolChoiceOption = ThreadsAPI.AssistantToolChoiceOption; - export import Thread = ThreadsAPI.Thread; - export import ThreadDeleted = ThreadsAPI.ThreadDeleted; - export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams; - export import ThreadUpdateParams = ThreadsAPI.ThreadUpdateParams; - export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams; - export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; - export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; - export import ThreadCreateAndRunPollParams = ThreadsAPI.ThreadCreateAndRunPollParams; - export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams; +Beta.VectorStores = VectorStores; +Beta.VectorStoresPage = VectorStoresPage; +Beta.Assistants = Assistants; +Beta.AssistantsPage = AssistantsPage; +Beta.Threads = Threads; + +export declare namespace Beta { + export { + VectorStores as VectorStores, + type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, + type FileChunkingStrategy as FileChunkingStrategy, + type FileChunkingStrategyParam as FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy as StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam, + type VectorStore as VectorStore, + type VectorStoreDeleted as VectorStoreDeleted, + VectorStoresPage as VectorStoresPage, + type VectorStoreCreateParams as VectorStoreCreateParams, + type VectorStoreUpdateParams as VectorStoreUpdateParams, + type VectorStoreListParams as VectorStoreListParams, + }; + + export { Chat }; + + export { + Assistants as Assistants, + type Assistant as Assistant, + type AssistantDeleted as AssistantDeleted, + type AssistantStreamEvent as AssistantStreamEvent, + type AssistantTool as AssistantTool, + type CodeInterpreterTool as CodeInterpreterTool, + type FileSearchTool as FileSearchTool, + type FunctionTool as FunctionTool, + type MessageStreamEvent as MessageStreamEvent, + type RunStepStreamEvent as RunStepStreamEvent, + type RunStreamEvent as RunStreamEvent, + type ThreadStreamEvent as ThreadStreamEvent, + AssistantsPage as AssistantsPage, + type AssistantCreateParams as AssistantCreateParams, + type AssistantUpdateParams as AssistantUpdateParams, + type AssistantListParams as AssistantListParams, + }; + + export { + Threads as Threads, + type AssistantResponseFormatOption as AssistantResponseFormatOption, + type AssistantToolChoice as AssistantToolChoice, + type AssistantToolChoiceFunction as AssistantToolChoiceFunction, + type AssistantToolChoiceOption as AssistantToolChoiceOption, + type Thread as Thread, + type ThreadDeleted as ThreadDeleted, + type ThreadCreateParams as ThreadCreateParams, + type ThreadUpdateParams as ThreadUpdateParams, + type ThreadCreateAndRunParams as ThreadCreateAndRunParams, + type ThreadCreateAndRunParamsNonStreaming as ThreadCreateAndRunParamsNonStreaming, + type ThreadCreateAndRunParamsStreaming as ThreadCreateAndRunParamsStreaming, + type ThreadCreateAndRunPollParams, + type ThreadCreateAndRunStreamParams, + }; } diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index 9fcf805a1..d7111288f 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -1,54 +1,54 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { - Assistant, - AssistantDeleted, - AssistantStreamEvent, - AssistantTool, - CodeInterpreterTool, - FileSearchTool, - FunctionTool, - MessageStreamEvent, - RunStepStreamEvent, - RunStreamEvent, - ThreadStreamEvent, - AssistantCreateParams, - AssistantUpdateParams, - AssistantListParams, AssistantsPage, Assistants, + type Assistant, + type AssistantDeleted, + type AssistantStreamEvent, + type AssistantTool, + type CodeInterpreterTool, + type FileSearchTool, + type FunctionTool, + type MessageStreamEvent, + type RunStepStreamEvent, + type RunStreamEvent, + type ThreadStreamEvent, + type AssistantCreateParams, + type AssistantUpdateParams, + type AssistantListParams, } from './assistants'; +export { Beta } from './beta'; +export { Chat } from './chat/index'; export { - AssistantResponseFormatOption, - AssistantToolChoice, - AssistantToolChoiceFunction, - AssistantToolChoiceOption, - Thread, - ThreadDeleted, - ThreadCreateParams, - ThreadUpdateParams, - ThreadCreateAndRunParams, - ThreadCreateAndRunParamsNonStreaming, - ThreadCreateAndRunParamsStreaming, - ThreadCreateAndRunPollParams, - ThreadCreateAndRunStreamParams, Threads, + type AssistantResponseFormatOption, + type AssistantToolChoice, + type AssistantToolChoiceFunction, + type AssistantToolChoiceOption, + type Thread, + type ThreadDeleted, + type ThreadCreateParams, + type ThreadUpdateParams, + type ThreadCreateAndRunParams, + type ThreadCreateAndRunParamsNonStreaming, + type ThreadCreateAndRunParamsStreaming, + type ThreadCreateAndRunPollParams, + type ThreadCreateAndRunStreamParams, } from './threads/index'; -export { Beta } from './beta'; -export { Chat } from './chat/index'; export { - AutoFileChunkingStrategyParam, - FileChunkingStrategy, - FileChunkingStrategyParam, - OtherFileChunkingStrategyObject, - StaticFileChunkingStrategy, - StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyParam, - VectorStore, - VectorStoreDeleted, - VectorStoreCreateParams, - VectorStoreUpdateParams, - VectorStoreListParams, VectorStoresPage, VectorStores, + type AutoFileChunkingStrategyParam, + type FileChunkingStrategy, + type FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyParam, + type VectorStore, + type VectorStoreDeleted, + type VectorStoreCreateParams, + type VectorStoreUpdateParams, + type VectorStoreListParams, } from './vector-stores/index'; diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts index 1964cffb8..f67a1edde 100644 --- a/src/resources/beta/threads/index.ts +++ b/src/resources/beta/threads/index.ts @@ -1,73 +1,73 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { - Annotation, - AnnotationDelta, - FileCitationAnnotation, - FileCitationDeltaAnnotation, - FilePathAnnotation, - FilePathDeltaAnnotation, - ImageFile, - ImageFileContentBlock, - ImageFileDelta, - ImageFileDeltaBlock, - ImageURL, - ImageURLContentBlock, - ImageURLDelta, - ImageURLDeltaBlock, - Message, - MessageContent, - MessageContentDelta, - MessageContentPartParam, - MessageDeleted, - MessageDelta, - MessageDeltaEvent, - RefusalContentBlock, - RefusalDeltaBlock, - Text, - TextContentBlock, - TextContentBlockParam, - TextDelta, - TextDeltaBlock, - MessageCreateParams, - MessageUpdateParams, - MessageListParams, MessagesPage, Messages, + type Annotation, + type AnnotationDelta, + type FileCitationAnnotation, + type FileCitationDeltaAnnotation, + type FilePathAnnotation, + type FilePathDeltaAnnotation, + type ImageFile, + type ImageFileContentBlock, + type ImageFileDelta, + type ImageFileDeltaBlock, + type ImageURL, + type ImageURLContentBlock, + type ImageURLDelta, + type ImageURLDeltaBlock, + type Message, + type MessageContent, + type MessageContentDelta, + type MessageContentPartParam, + type MessageDeleted, + type MessageDelta, + type MessageDeltaEvent, + type RefusalContentBlock, + type RefusalDeltaBlock, + type Text, + type TextContentBlock, + type TextContentBlockParam, + type TextDelta, + type TextDeltaBlock, + type MessageCreateParams, + type MessageUpdateParams, + type MessageListParams, } from './messages'; export { - AssistantResponseFormatOption, - AssistantToolChoice, - AssistantToolChoiceFunction, - AssistantToolChoiceOption, - Thread, - ThreadDeleted, - ThreadCreateParams, - ThreadUpdateParams, - ThreadCreateAndRunParams, - ThreadCreateAndRunParamsNonStreaming, - ThreadCreateAndRunParamsStreaming, - ThreadCreateAndRunPollParams, - ThreadCreateAndRunStreamParams, - Threads, -} from './threads'; -export { - RequiredActionFunctionToolCall, - Run, - RunStatus, - RunCreateParams, - RunCreateParamsNonStreaming, - RunCreateParamsStreaming, - RunUpdateParams, - RunListParams, - RunCreateAndPollParams, - RunCreateAndStreamParams, - RunStreamParams, - RunSubmitToolOutputsParams, - RunSubmitToolOutputsParamsNonStreaming, - RunSubmitToolOutputsParamsStreaming, - RunSubmitToolOutputsAndPollParams, - RunSubmitToolOutputsStreamParams, RunsPage, Runs, + type RequiredActionFunctionToolCall, + type Run, + type RunStatus, + type RunCreateParams, + type RunCreateParamsNonStreaming, + type RunCreateParamsStreaming, + type RunUpdateParams, + type RunListParams, + type RunSubmitToolOutputsParams, + type RunSubmitToolOutputsParamsNonStreaming, + type RunSubmitToolOutputsParamsStreaming, + type RunCreateAndPollParams, + type RunCreateAndStreamParams, + type RunStreamParams, + type RunSubmitToolOutputsAndPollParams, + type RunSubmitToolOutputsStreamParams, } from './runs/index'; +export { + Threads, + type AssistantResponseFormatOption, + type AssistantToolChoice, + type AssistantToolChoiceFunction, + type AssistantToolChoiceOption, + type Thread, + type ThreadDeleted, + type ThreadCreateParams, + type ThreadUpdateParams, + type ThreadCreateAndRunParams, + type ThreadCreateAndRunParamsNonStreaming, + type ThreadCreateAndRunParamsStreaming, + type ThreadCreateAndRunPollParams, + type ThreadCreateAndRunStreamParams, +} from './threads'; diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index 59c92675b..af7977667 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -3,7 +3,6 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; -import * as MessagesAPI from './messages'; import * as AssistantsAPI from '../assistants'; import { CursorPage, type CursorPageParams } from '../../../pagination'; @@ -722,37 +721,41 @@ export interface MessageListParams extends CursorPageParams { run_id?: string; } -export namespace Messages { - export import Annotation = MessagesAPI.Annotation; - export import AnnotationDelta = MessagesAPI.AnnotationDelta; - export import FileCitationAnnotation = MessagesAPI.FileCitationAnnotation; - export import FileCitationDeltaAnnotation = MessagesAPI.FileCitationDeltaAnnotation; - export import FilePathAnnotation = MessagesAPI.FilePathAnnotation; - export import FilePathDeltaAnnotation = MessagesAPI.FilePathDeltaAnnotation; - export import ImageFile = MessagesAPI.ImageFile; - export import ImageFileContentBlock = MessagesAPI.ImageFileContentBlock; - export import ImageFileDelta = MessagesAPI.ImageFileDelta; - export import ImageFileDeltaBlock = MessagesAPI.ImageFileDeltaBlock; - export import ImageURL = MessagesAPI.ImageURL; - export import ImageURLContentBlock = MessagesAPI.ImageURLContentBlock; - export import ImageURLDelta = MessagesAPI.ImageURLDelta; - export import ImageURLDeltaBlock = MessagesAPI.ImageURLDeltaBlock; - export import Message = MessagesAPI.Message; - export import MessageContent = MessagesAPI.MessageContent; - export import MessageContentDelta = MessagesAPI.MessageContentDelta; - export import MessageContentPartParam = MessagesAPI.MessageContentPartParam; - export import MessageDeleted = MessagesAPI.MessageDeleted; - export import MessageDelta = MessagesAPI.MessageDelta; - export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent; - export import RefusalContentBlock = MessagesAPI.RefusalContentBlock; - export import RefusalDeltaBlock = MessagesAPI.RefusalDeltaBlock; - export import Text = MessagesAPI.Text; - export import TextContentBlock = MessagesAPI.TextContentBlock; - export import TextContentBlockParam = MessagesAPI.TextContentBlockParam; - export import TextDelta = MessagesAPI.TextDelta; - export import TextDeltaBlock = MessagesAPI.TextDeltaBlock; - export import MessagesPage = MessagesAPI.MessagesPage; - export import MessageCreateParams = MessagesAPI.MessageCreateParams; - export import MessageUpdateParams = MessagesAPI.MessageUpdateParams; - export import MessageListParams = MessagesAPI.MessageListParams; +Messages.MessagesPage = MessagesPage; + +export declare namespace Messages { + export { + type Annotation as Annotation, + type AnnotationDelta as AnnotationDelta, + type FileCitationAnnotation as FileCitationAnnotation, + type FileCitationDeltaAnnotation as FileCitationDeltaAnnotation, + type FilePathAnnotation as FilePathAnnotation, + type FilePathDeltaAnnotation as FilePathDeltaAnnotation, + type ImageFile as ImageFile, + type ImageFileContentBlock as ImageFileContentBlock, + type ImageFileDelta as ImageFileDelta, + type ImageFileDeltaBlock as ImageFileDeltaBlock, + type ImageURL as ImageURL, + type ImageURLContentBlock as ImageURLContentBlock, + type ImageURLDelta as ImageURLDelta, + type ImageURLDeltaBlock as ImageURLDeltaBlock, + type Message as Message, + type MessageContent as MessageContent, + type MessageContentDelta as MessageContentDelta, + type MessageContentPartParam as MessageContentPartParam, + type MessageDeleted as MessageDeleted, + type MessageDelta as MessageDelta, + type MessageDeltaEvent as MessageDeltaEvent, + type RefusalContentBlock as RefusalContentBlock, + type RefusalDeltaBlock as RefusalDeltaBlock, + type Text as Text, + type TextContentBlock as TextContentBlock, + type TextContentBlockParam as TextContentBlockParam, + type TextDelta as TextDelta, + type TextDeltaBlock as TextDeltaBlock, + MessagesPage as MessagesPage, + type MessageCreateParams as MessageCreateParams, + type MessageUpdateParams as MessageUpdateParams, + type MessageListParams as MessageListParams, + }; } diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts index 9496f59e1..9dbe575bc 100644 --- a/src/resources/beta/threads/runs/index.ts +++ b/src/resources/beta/threads/runs/index.ts @@ -1,46 +1,46 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { - CodeInterpreterLogs, - CodeInterpreterOutputImage, - CodeInterpreterToolCall, - CodeInterpreterToolCallDelta, - FileSearchToolCall, - FileSearchToolCallDelta, - FunctionToolCall, - FunctionToolCallDelta, - MessageCreationStepDetails, - RunStep, - RunStepDelta, - RunStepDeltaEvent, - RunStepDeltaMessageDelta, - RunStepInclude, - ToolCall, - ToolCallDelta, - ToolCallDeltaObject, - ToolCallsStepDetails, - StepRetrieveParams, - StepListParams, RunStepsPage, Steps, + type CodeInterpreterLogs, + type CodeInterpreterOutputImage, + type CodeInterpreterToolCall, + type CodeInterpreterToolCallDelta, + type FileSearchToolCall, + type FileSearchToolCallDelta, + type FunctionToolCall, + type FunctionToolCallDelta, + type MessageCreationStepDetails, + type RunStep, + type RunStepDelta, + type RunStepDeltaEvent, + type RunStepDeltaMessageDelta, + type RunStepInclude, + type ToolCall, + type ToolCallDelta, + type ToolCallDeltaObject, + type ToolCallsStepDetails, + type StepRetrieveParams, + type StepListParams, } from './steps'; export { - RequiredActionFunctionToolCall, - Run, - RunStatus, - RunCreateParams, - RunCreateParamsNonStreaming, - RunCreateParamsStreaming, - RunUpdateParams, - RunListParams, - RunCreateAndPollParams, - RunCreateAndStreamParams, - RunStreamParams, - RunSubmitToolOutputsParams, - RunSubmitToolOutputsParamsNonStreaming, - RunSubmitToolOutputsParamsStreaming, - RunSubmitToolOutputsAndPollParams, - RunSubmitToolOutputsStreamParams, RunsPage, Runs, + type RequiredActionFunctionToolCall, + type Run, + type RunStatus, + type RunCreateParams, + type RunCreateParamsNonStreaming, + type RunCreateParamsStreaming, + type RunUpdateParams, + type RunListParams, + type RunCreateAndPollParams, + type RunCreateAndStreamParams, + type RunStreamParams, + type RunSubmitToolOutputsParams, + type RunSubmitToolOutputsParamsNonStreaming, + type RunSubmitToolOutputsParamsStreaming, + type RunSubmitToolOutputsAndPollParams, + type RunSubmitToolOutputsStreamParams, } from './runs'; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index b48edd5b1..83a447a91 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -13,6 +13,30 @@ import * as ChatAPI from '../../../chat/chat'; import * as MessagesAPI from '../messages'; import * as ThreadsAPI from '../threads'; import * as StepsAPI from './steps'; +import { + CodeInterpreterLogs, + CodeInterpreterOutputImage, + CodeInterpreterToolCall, + CodeInterpreterToolCallDelta, + FileSearchToolCall, + FileSearchToolCallDelta, + FunctionToolCall, + FunctionToolCallDelta, + MessageCreationStepDetails, + RunStep, + RunStepDelta, + RunStepDeltaEvent, + RunStepDeltaMessageDelta, + RunStepInclude, + RunStepsPage, + StepListParams, + StepRetrieveParams, + Steps, + ToolCall, + ToolCallDelta, + ToolCallDeltaObject, + ToolCallsStepDetails, +} from './steps'; import { CursorPage, type CursorPageParams } from '../../../../pagination'; import { Stream } from '../../../../streaming'; @@ -1619,44 +1643,53 @@ export namespace RunSubmitToolOutputsStreamParams { } } -export namespace Runs { - export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall; - export import Run = RunsAPI.Run; - export import RunStatus = RunsAPI.RunStatus; - export import RunsPage = RunsAPI.RunsPage; - export import RunCreateParams = RunsAPI.RunCreateParams; - export import RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming; - export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; - export import RunUpdateParams = RunsAPI.RunUpdateParams; - export import RunListParams = RunsAPI.RunListParams; - export import RunCreateAndPollParams = RunsAPI.RunCreateAndPollParams; - export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams; - export import RunStreamParams = RunsAPI.RunStreamParams; - export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams; - export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming; - export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming; - export import RunSubmitToolOutputsAndPollParams = RunsAPI.RunSubmitToolOutputsAndPollParams; - export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams; - export import Steps = StepsAPI.Steps; - export import CodeInterpreterLogs = StepsAPI.CodeInterpreterLogs; - export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage; - export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall; - export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta; - export import FileSearchToolCall = StepsAPI.FileSearchToolCall; - export import FileSearchToolCallDelta = StepsAPI.FileSearchToolCallDelta; - export import FunctionToolCall = StepsAPI.FunctionToolCall; - export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta; - export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails; - export import RunStep = StepsAPI.RunStep; - export import RunStepDelta = StepsAPI.RunStepDelta; - export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent; - export import RunStepDeltaMessageDelta = StepsAPI.RunStepDeltaMessageDelta; - export import RunStepInclude = StepsAPI.RunStepInclude; - export import ToolCall = StepsAPI.ToolCall; - export import ToolCallDelta = StepsAPI.ToolCallDelta; - export import ToolCallDeltaObject = StepsAPI.ToolCallDeltaObject; - export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails; - export import RunStepsPage = StepsAPI.RunStepsPage; - export import StepRetrieveParams = StepsAPI.StepRetrieveParams; - export import StepListParams = StepsAPI.StepListParams; +Runs.RunsPage = RunsPage; +Runs.Steps = Steps; +Runs.RunStepsPage = RunStepsPage; + +export declare namespace Runs { + export { + type RequiredActionFunctionToolCall as RequiredActionFunctionToolCall, + type Run as Run, + type RunStatus as RunStatus, + RunsPage as RunsPage, + type RunCreateParams as RunCreateParams, + type RunCreateParamsNonStreaming as RunCreateParamsNonStreaming, + type RunCreateParamsStreaming as RunCreateParamsStreaming, + type RunUpdateParams as RunUpdateParams, + type RunListParams as RunListParams, + type RunCreateAndPollParams, + type RunCreateAndStreamParams, + type RunStreamParams, + type RunSubmitToolOutputsParams as RunSubmitToolOutputsParams, + type RunSubmitToolOutputsParamsNonStreaming as RunSubmitToolOutputsParamsNonStreaming, + type RunSubmitToolOutputsParamsStreaming as RunSubmitToolOutputsParamsStreaming, + type RunSubmitToolOutputsAndPollParams, + type RunSubmitToolOutputsStreamParams, + }; + + export { + Steps as Steps, + type CodeInterpreterLogs as CodeInterpreterLogs, + type CodeInterpreterOutputImage as CodeInterpreterOutputImage, + type CodeInterpreterToolCall as CodeInterpreterToolCall, + type CodeInterpreterToolCallDelta as CodeInterpreterToolCallDelta, + type FileSearchToolCall as FileSearchToolCall, + type FileSearchToolCallDelta as FileSearchToolCallDelta, + type FunctionToolCall as FunctionToolCall, + type FunctionToolCallDelta as FunctionToolCallDelta, + type MessageCreationStepDetails as MessageCreationStepDetails, + type RunStep as RunStep, + type RunStepDelta as RunStepDelta, + type RunStepDeltaEvent as RunStepDeltaEvent, + type RunStepDeltaMessageDelta as RunStepDeltaMessageDelta, + type RunStepInclude as RunStepInclude, + type ToolCall as ToolCall, + type ToolCallDelta as ToolCallDelta, + type ToolCallDeltaObject as ToolCallDeltaObject, + type ToolCallsStepDetails as ToolCallsStepDetails, + RunStepsPage as RunStepsPage, + type StepRetrieveParams as StepRetrieveParams, + type StepListParams as StepListParams, + }; } diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index c076191a3..b10bcb868 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -738,26 +738,30 @@ export interface StepListParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export namespace Steps { - export import CodeInterpreterLogs = StepsAPI.CodeInterpreterLogs; - export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage; - export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall; - export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta; - export import FileSearchToolCall = StepsAPI.FileSearchToolCall; - export import FileSearchToolCallDelta = StepsAPI.FileSearchToolCallDelta; - export import FunctionToolCall = StepsAPI.FunctionToolCall; - export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta; - export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails; - export import RunStep = StepsAPI.RunStep; - export import RunStepDelta = StepsAPI.RunStepDelta; - export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent; - export import RunStepDeltaMessageDelta = StepsAPI.RunStepDeltaMessageDelta; - export import RunStepInclude = StepsAPI.RunStepInclude; - export import ToolCall = StepsAPI.ToolCall; - export import ToolCallDelta = StepsAPI.ToolCallDelta; - export import ToolCallDeltaObject = StepsAPI.ToolCallDeltaObject; - export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails; - export import RunStepsPage = StepsAPI.RunStepsPage; - export import StepRetrieveParams = StepsAPI.StepRetrieveParams; - export import StepListParams = StepsAPI.StepListParams; +Steps.RunStepsPage = RunStepsPage; + +export declare namespace Steps { + export { + type CodeInterpreterLogs as CodeInterpreterLogs, + type CodeInterpreterOutputImage as CodeInterpreterOutputImage, + type CodeInterpreterToolCall as CodeInterpreterToolCall, + type CodeInterpreterToolCallDelta as CodeInterpreterToolCallDelta, + type FileSearchToolCall as FileSearchToolCall, + type FileSearchToolCallDelta as FileSearchToolCallDelta, + type FunctionToolCall as FunctionToolCall, + type FunctionToolCallDelta as FunctionToolCallDelta, + type MessageCreationStepDetails as MessageCreationStepDetails, + type RunStep as RunStep, + type RunStepDelta as RunStepDelta, + type RunStepDeltaEvent as RunStepDeltaEvent, + type RunStepDeltaMessageDelta as RunStepDeltaMessageDelta, + type RunStepInclude as RunStepInclude, + type ToolCall as ToolCall, + type ToolCallDelta as ToolCallDelta, + type ToolCallDeltaObject as ToolCallDeltaObject, + type ToolCallsStepDetails as ToolCallsStepDetails, + RunStepsPage as RunStepsPage, + type StepRetrieveParams as StepRetrieveParams, + type StepListParams as StepListParams, + }; } diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index be959eb30..899645508 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -10,8 +10,63 @@ import * as Shared from '../../shared'; import * as AssistantsAPI from '../assistants'; import * as ChatAPI from '../../chat/chat'; import * as MessagesAPI from './messages'; +import { + Annotation, + AnnotationDelta, + FileCitationAnnotation, + FileCitationDeltaAnnotation, + FilePathAnnotation, + FilePathDeltaAnnotation, + ImageFile, + ImageFileContentBlock, + ImageFileDelta, + ImageFileDeltaBlock, + ImageURL, + ImageURLContentBlock, + ImageURLDelta, + ImageURLDeltaBlock, + Message as MessagesAPIMessage, + MessageContent, + MessageContentDelta, + MessageContentPartParam, + MessageCreateParams, + MessageDeleted, + MessageDelta, + MessageDeltaEvent, + MessageListParams, + MessageUpdateParams, + Messages, + MessagesPage, + RefusalContentBlock, + RefusalDeltaBlock, + Text, + TextContentBlock, + TextContentBlockParam, + TextDelta, + TextDeltaBlock, +} from './messages'; import * as VectorStoresAPI from '../vector-stores/vector-stores'; import * as RunsAPI from './runs/runs'; +import { + RequiredActionFunctionToolCall, + Run, + RunCreateAndPollParams, + RunCreateAndStreamParams, + RunCreateParams, + RunCreateParamsNonStreaming, + RunCreateParamsStreaming, + RunListParams, + RunStatus, + RunStreamParams, + RunSubmitToolOutputsAndPollParams, + RunSubmitToolOutputsParams, + RunSubmitToolOutputsParamsNonStreaming, + RunSubmitToolOutputsParamsStreaming, + RunSubmitToolOutputsStreamParams, + RunUpdateParams, + Runs, + RunsPage, +} from './runs/runs'; import { Stream } from '../../../streaming'; export class Threads extends APIResource { @@ -1489,69 +1544,82 @@ export namespace ThreadCreateAndRunStreamParams { } } -export namespace Threads { - export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption; - export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice; - export import AssistantToolChoiceFunction = ThreadsAPI.AssistantToolChoiceFunction; - export import AssistantToolChoiceOption = ThreadsAPI.AssistantToolChoiceOption; - export import Thread = ThreadsAPI.Thread; - export import ThreadDeleted = ThreadsAPI.ThreadDeleted; - export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams; - export import ThreadUpdateParams = ThreadsAPI.ThreadUpdateParams; - export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams; - export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; - export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; - export import ThreadCreateAndRunPollParams = ThreadsAPI.ThreadCreateAndRunPollParams; - export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams; - export import Runs = RunsAPI.Runs; - export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall; - export import Run = RunsAPI.Run; - export import RunStatus = RunsAPI.RunStatus; - export import RunsPage = RunsAPI.RunsPage; - export import RunCreateParams = RunsAPI.RunCreateParams; - export import RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming; - export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; - export import RunUpdateParams = RunsAPI.RunUpdateParams; - export import RunListParams = RunsAPI.RunListParams; - export import RunCreateAndPollParams = RunsAPI.RunCreateAndPollParams; - export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams; - export import RunStreamParams = RunsAPI.RunStreamParams; - export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams; - export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming; - export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming; - export import RunSubmitToolOutputsAndPollParams = RunsAPI.RunSubmitToolOutputsAndPollParams; - export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams; - export import Messages = MessagesAPI.Messages; - export import Annotation = MessagesAPI.Annotation; - export import AnnotationDelta = MessagesAPI.AnnotationDelta; - export import FileCitationAnnotation = MessagesAPI.FileCitationAnnotation; - export import FileCitationDeltaAnnotation = MessagesAPI.FileCitationDeltaAnnotation; - export import FilePathAnnotation = MessagesAPI.FilePathAnnotation; - export import FilePathDeltaAnnotation = MessagesAPI.FilePathDeltaAnnotation; - export import ImageFile = MessagesAPI.ImageFile; - export import ImageFileContentBlock = MessagesAPI.ImageFileContentBlock; - export import ImageFileDelta = MessagesAPI.ImageFileDelta; - export import ImageFileDeltaBlock = MessagesAPI.ImageFileDeltaBlock; - export import ImageURL = MessagesAPI.ImageURL; - export import ImageURLContentBlock = MessagesAPI.ImageURLContentBlock; - export import ImageURLDelta = MessagesAPI.ImageURLDelta; - export import ImageURLDeltaBlock = MessagesAPI.ImageURLDeltaBlock; - export import Message = MessagesAPI.Message; - export import MessageContent = MessagesAPI.MessageContent; - export import MessageContentDelta = MessagesAPI.MessageContentDelta; - export import MessageContentPartParam = MessagesAPI.MessageContentPartParam; - export import MessageDeleted = MessagesAPI.MessageDeleted; - export import MessageDelta = MessagesAPI.MessageDelta; - export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent; - export import RefusalContentBlock = MessagesAPI.RefusalContentBlock; - export import RefusalDeltaBlock = MessagesAPI.RefusalDeltaBlock; - export import Text = MessagesAPI.Text; - export import TextContentBlock = MessagesAPI.TextContentBlock; - export import TextContentBlockParam = MessagesAPI.TextContentBlockParam; - export import TextDelta = MessagesAPI.TextDelta; - export import TextDeltaBlock = MessagesAPI.TextDeltaBlock; - export import MessagesPage = MessagesAPI.MessagesPage; - export import MessageCreateParams = MessagesAPI.MessageCreateParams; - export import MessageUpdateParams = MessagesAPI.MessageUpdateParams; - export import MessageListParams = MessagesAPI.MessageListParams; +Threads.Runs = Runs; +Threads.RunsPage = RunsPage; +Threads.Messages = Messages; +Threads.MessagesPage = MessagesPage; + +export declare namespace Threads { + export { + type AssistantResponseFormatOption as AssistantResponseFormatOption, + type AssistantToolChoice as AssistantToolChoice, + type AssistantToolChoiceFunction as AssistantToolChoiceFunction, + type AssistantToolChoiceOption as AssistantToolChoiceOption, + type Thread as Thread, + type ThreadDeleted as ThreadDeleted, + type ThreadCreateParams as ThreadCreateParams, + type ThreadUpdateParams as ThreadUpdateParams, + type ThreadCreateAndRunParams as ThreadCreateAndRunParams, + type ThreadCreateAndRunParamsNonStreaming as ThreadCreateAndRunParamsNonStreaming, + type ThreadCreateAndRunParamsStreaming as ThreadCreateAndRunParamsStreaming, + type ThreadCreateAndRunPollParams, + type ThreadCreateAndRunStreamParams, + }; + + export { + Runs as Runs, + type RequiredActionFunctionToolCall as RequiredActionFunctionToolCall, + type Run as Run, + type RunStatus as RunStatus, + RunsPage as RunsPage, + type RunCreateParams as RunCreateParams, + type RunCreateParamsNonStreaming as RunCreateParamsNonStreaming, + type RunCreateParamsStreaming as RunCreateParamsStreaming, + type RunUpdateParams as RunUpdateParams, + type RunListParams as RunListParams, + type RunCreateAndPollParams, + type RunCreateAndStreamParams, + type RunStreamParams, + type RunSubmitToolOutputsParams as RunSubmitToolOutputsParams, + type RunSubmitToolOutputsParamsNonStreaming as RunSubmitToolOutputsParamsNonStreaming, + type RunSubmitToolOutputsParamsStreaming as RunSubmitToolOutputsParamsStreaming, + type RunSubmitToolOutputsAndPollParams, + type RunSubmitToolOutputsStreamParams, + }; + + export { + Messages as Messages, + type Annotation as Annotation, + type AnnotationDelta as AnnotationDelta, + type FileCitationAnnotation as FileCitationAnnotation, + type FileCitationDeltaAnnotation as FileCitationDeltaAnnotation, + type FilePathAnnotation as FilePathAnnotation, + type FilePathDeltaAnnotation as FilePathDeltaAnnotation, + type ImageFile as ImageFile, + type ImageFileContentBlock as ImageFileContentBlock, + type ImageFileDelta as ImageFileDelta, + type ImageFileDeltaBlock as ImageFileDeltaBlock, + type ImageURL as ImageURL, + type ImageURLContentBlock as ImageURLContentBlock, + type ImageURLDelta as ImageURLDelta, + type ImageURLDeltaBlock as ImageURLDeltaBlock, + type MessagesAPIMessage as Message, + type MessageContent as MessageContent, + type MessageContentDelta as MessageContentDelta, + type MessageContentPartParam as MessageContentPartParam, + type MessageDeleted as MessageDeleted, + type MessageDelta as MessageDelta, + type MessageDeltaEvent as MessageDeltaEvent, + type RefusalContentBlock as RefusalContentBlock, + type RefusalDeltaBlock as RefusalDeltaBlock, + type Text as Text, + type TextContentBlock as TextContentBlock, + type TextContentBlockParam as TextContentBlockParam, + type TextDelta as TextDelta, + type TextDeltaBlock as TextDeltaBlock, + MessagesPage as MessagesPage, + type MessageCreateParams as MessageCreateParams, + type MessageUpdateParams as MessageUpdateParams, + type MessageListParams as MessageListParams, + }; } diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/beta/vector-stores/file-batches.ts index 3436d7575..533e6ce03 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/beta/vector-stores/file-batches.ts @@ -6,7 +6,6 @@ import { sleep } from '../../../core'; import { Uploadable } from '../../../core'; import { allSettledWithThrow } from '../../../lib/Util'; import * as Core from '../../../core'; -import * as FileBatchesAPI from './file-batches'; import * as FilesAPI from './files'; import { VectorStoreFilesPage } from './files'; import * as VectorStoresAPI from './vector-stores'; @@ -294,10 +293,12 @@ export interface FileBatchListFilesParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export namespace FileBatches { - export import VectorStoreFileBatch = FileBatchesAPI.VectorStoreFileBatch; - export import FileBatchCreateParams = FileBatchesAPI.FileBatchCreateParams; - export import FileBatchListFilesParams = FileBatchesAPI.FileBatchListFilesParams; +export declare namespace FileBatches { + export { + type VectorStoreFileBatch as VectorStoreFileBatch, + type FileBatchCreateParams as FileBatchCreateParams, + type FileBatchListFilesParams as FileBatchListFilesParams, + }; } export { VectorStoreFilesPage }; diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts index f82cd63df..a263a0491 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/beta/vector-stores/files.ts @@ -3,7 +3,6 @@ import { APIResource } from '../../../resource'; import { sleep, Uploadable, isRequestOptions } from '../../../core'; import * as Core from '../../../core'; -import * as FilesAPI from './files'; import * as VectorStoresAPI from './vector-stores'; import { CursorPage, type CursorPageParams } from '../../../pagination'; @@ -286,10 +285,14 @@ export interface FileListParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export namespace Files { - export import VectorStoreFile = FilesAPI.VectorStoreFile; - export import VectorStoreFileDeleted = FilesAPI.VectorStoreFileDeleted; - export import VectorStoreFilesPage = FilesAPI.VectorStoreFilesPage; - export import FileCreateParams = FilesAPI.FileCreateParams; - export import FileListParams = FilesAPI.FileListParams; +Files.VectorStoreFilesPage = VectorStoreFilesPage; + +export declare namespace Files { + export { + type VectorStoreFile as VectorStoreFile, + type VectorStoreFileDeleted as VectorStoreFileDeleted, + VectorStoreFilesPage as VectorStoreFilesPage, + type FileCreateParams as FileCreateParams, + type FileListParams as FileListParams, + }; } diff --git a/src/resources/beta/vector-stores/index.ts b/src/resources/beta/vector-stores/index.ts index f70215f8f..89fc0cde0 100644 --- a/src/resources/beta/vector-stores/index.ts +++ b/src/resources/beta/vector-stores/index.ts @@ -1,32 +1,32 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { - AutoFileChunkingStrategyParam, - FileChunkingStrategy, - FileChunkingStrategyParam, - OtherFileChunkingStrategyObject, - StaticFileChunkingStrategy, - StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyParam, - VectorStore, - VectorStoreDeleted, - VectorStoreCreateParams, - VectorStoreUpdateParams, - VectorStoreListParams, - VectorStoresPage, - VectorStores, -} from './vector-stores'; + FileBatches, + type VectorStoreFileBatch, + type FileBatchCreateParams, + type FileBatchListFilesParams, +} from './file-batches'; export { - VectorStoreFile, - VectorStoreFileDeleted, - FileCreateParams, - FileListParams, VectorStoreFilesPage, Files, + type VectorStoreFile, + type VectorStoreFileDeleted, + type FileCreateParams, + type FileListParams, } from './files'; export { - VectorStoreFileBatch, - FileBatchCreateParams, - FileBatchListFilesParams, - FileBatches, -} from './file-batches'; + VectorStoresPage, + VectorStores, + type AutoFileChunkingStrategyParam, + type FileChunkingStrategy, + type FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyParam, + type VectorStore, + type VectorStoreDeleted, + type VectorStoreCreateParams, + type VectorStoreUpdateParams, + type VectorStoreListParams, +} from './vector-stores'; diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts index 3c9aa707d..4d1e83dce 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/beta/vector-stores/vector-stores.ts @@ -3,9 +3,22 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; -import * as VectorStoresAPI from './vector-stores'; import * as FileBatchesAPI from './file-batches'; +import { + FileBatchCreateParams, + FileBatchListFilesParams, + FileBatches, + VectorStoreFileBatch, +} from './file-batches'; import * as FilesAPI from './files'; +import { + FileCreateParams, + FileListParams, + Files, + VectorStoreFile, + VectorStoreFileDeleted, + VectorStoreFilesPage, +} from './files'; import { CursorPage, type CursorPageParams } from '../../../pagination'; export class VectorStores extends APIResource { @@ -371,28 +384,41 @@ export interface VectorStoreListParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export namespace VectorStores { - export import AutoFileChunkingStrategyParam = VectorStoresAPI.AutoFileChunkingStrategyParam; - export import FileChunkingStrategy = VectorStoresAPI.FileChunkingStrategy; - export import FileChunkingStrategyParam = VectorStoresAPI.FileChunkingStrategyParam; - export import OtherFileChunkingStrategyObject = VectorStoresAPI.OtherFileChunkingStrategyObject; - export import StaticFileChunkingStrategy = VectorStoresAPI.StaticFileChunkingStrategy; - export import StaticFileChunkingStrategyObject = VectorStoresAPI.StaticFileChunkingStrategyObject; - export import StaticFileChunkingStrategyParam = VectorStoresAPI.StaticFileChunkingStrategyParam; - export import VectorStore = VectorStoresAPI.VectorStore; - export import VectorStoreDeleted = VectorStoresAPI.VectorStoreDeleted; - export import VectorStoresPage = VectorStoresAPI.VectorStoresPage; - export import VectorStoreCreateParams = VectorStoresAPI.VectorStoreCreateParams; - export import VectorStoreUpdateParams = VectorStoresAPI.VectorStoreUpdateParams; - export import VectorStoreListParams = VectorStoresAPI.VectorStoreListParams; - export import Files = FilesAPI.Files; - export import VectorStoreFile = FilesAPI.VectorStoreFile; - export import VectorStoreFileDeleted = FilesAPI.VectorStoreFileDeleted; - export import VectorStoreFilesPage = FilesAPI.VectorStoreFilesPage; - export import FileCreateParams = FilesAPI.FileCreateParams; - export import FileListParams = FilesAPI.FileListParams; - export import FileBatches = FileBatchesAPI.FileBatches; - export import VectorStoreFileBatch = FileBatchesAPI.VectorStoreFileBatch; - export import FileBatchCreateParams = FileBatchesAPI.FileBatchCreateParams; - export import FileBatchListFilesParams = FileBatchesAPI.FileBatchListFilesParams; +VectorStores.VectorStoresPage = VectorStoresPage; +VectorStores.Files = Files; +VectorStores.VectorStoreFilesPage = VectorStoreFilesPage; +VectorStores.FileBatches = FileBatches; + +export declare namespace VectorStores { + export { + type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, + type FileChunkingStrategy as FileChunkingStrategy, + type FileChunkingStrategyParam as FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy as StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam, + type VectorStore as VectorStore, + type VectorStoreDeleted as VectorStoreDeleted, + VectorStoresPage as VectorStoresPage, + type VectorStoreCreateParams as VectorStoreCreateParams, + type VectorStoreUpdateParams as VectorStoreUpdateParams, + type VectorStoreListParams as VectorStoreListParams, + }; + + export { + Files as Files, + type VectorStoreFile as VectorStoreFile, + type VectorStoreFileDeleted as VectorStoreFileDeleted, + VectorStoreFilesPage as VectorStoreFilesPage, + type FileCreateParams as FileCreateParams, + type FileListParams as FileListParams, + }; + + export { + FileBatches as FileBatches, + type VectorStoreFileBatch as VectorStoreFileBatch, + type FileBatchCreateParams as FileBatchCreateParams, + type FileBatchListFilesParams as FileBatchListFilesParams, + }; } diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 43ef5662c..afe4dd08e 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -1,8 +1,42 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; -import * as ChatAPI from './chat'; import * as CompletionsAPI from './completions'; +import { + ChatCompletion, + ChatCompletionAssistantMessageParam, + ChatCompletionAudio, + ChatCompletionAudioParam, + ChatCompletionChunk, + ChatCompletionContentPart, + ChatCompletionContentPartImage, + ChatCompletionContentPartInputAudio, + ChatCompletionContentPartRefusal, + ChatCompletionContentPartText, + ChatCompletionCreateParams, + ChatCompletionCreateParamsNonStreaming, + ChatCompletionCreateParamsStreaming, + ChatCompletionFunctionCallOption, + ChatCompletionFunctionMessageParam, + ChatCompletionMessage, + ChatCompletionMessageParam, + ChatCompletionMessageToolCall, + ChatCompletionModality, + ChatCompletionNamedToolChoice, + ChatCompletionRole, + ChatCompletionStreamOptions, + ChatCompletionSystemMessageParam, + ChatCompletionTokenLogprob, + ChatCompletionTool, + ChatCompletionToolChoiceOption, + ChatCompletionToolMessageParam, + ChatCompletionUserMessageParam, + CompletionCreateParams, + CompletionCreateParamsNonStreaming, + CompletionCreateParamsStreaming, + Completions, + CreateChatCompletionRequestMessage, +} from './completions'; export class Chat extends APIResource { completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client); @@ -43,42 +77,44 @@ export type ChatModel = | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-16k-0613'; -export namespace Chat { - export import ChatModel = ChatAPI.ChatModel; - export import Completions = CompletionsAPI.Completions; - export import ChatCompletion = CompletionsAPI.ChatCompletion; - export import ChatCompletionAssistantMessageParam = CompletionsAPI.ChatCompletionAssistantMessageParam; - export import ChatCompletionAudio = CompletionsAPI.ChatCompletionAudio; - export import ChatCompletionAudioParam = CompletionsAPI.ChatCompletionAudioParam; - export import ChatCompletionChunk = CompletionsAPI.ChatCompletionChunk; - export import ChatCompletionContentPart = CompletionsAPI.ChatCompletionContentPart; - export import ChatCompletionContentPartImage = CompletionsAPI.ChatCompletionContentPartImage; - export import ChatCompletionContentPartInputAudio = CompletionsAPI.ChatCompletionContentPartInputAudio; - export import ChatCompletionContentPartRefusal = CompletionsAPI.ChatCompletionContentPartRefusal; - export import ChatCompletionContentPartText = CompletionsAPI.ChatCompletionContentPartText; - export import ChatCompletionFunctionCallOption = CompletionsAPI.ChatCompletionFunctionCallOption; - export import ChatCompletionFunctionMessageParam = CompletionsAPI.ChatCompletionFunctionMessageParam; - export import ChatCompletionMessage = CompletionsAPI.ChatCompletionMessage; - export import ChatCompletionMessageParam = CompletionsAPI.ChatCompletionMessageParam; - export import ChatCompletionMessageToolCall = CompletionsAPI.ChatCompletionMessageToolCall; - export import ChatCompletionModality = CompletionsAPI.ChatCompletionModality; - export import ChatCompletionNamedToolChoice = CompletionsAPI.ChatCompletionNamedToolChoice; - export import ChatCompletionRole = CompletionsAPI.ChatCompletionRole; - export import ChatCompletionStreamOptions = CompletionsAPI.ChatCompletionStreamOptions; - export import ChatCompletionSystemMessageParam = CompletionsAPI.ChatCompletionSystemMessageParam; - export import ChatCompletionTokenLogprob = CompletionsAPI.ChatCompletionTokenLogprob; - export import ChatCompletionTool = CompletionsAPI.ChatCompletionTool; - export import ChatCompletionToolChoiceOption = CompletionsAPI.ChatCompletionToolChoiceOption; - export import ChatCompletionToolMessageParam = CompletionsAPI.ChatCompletionToolMessageParam; - export import ChatCompletionUserMessageParam = CompletionsAPI.ChatCompletionUserMessageParam; - /** - * @deprecated ChatCompletionMessageParam should be used instead - */ - export import CreateChatCompletionRequestMessage = CompletionsAPI.CreateChatCompletionRequestMessage; - export import ChatCompletionCreateParams = CompletionsAPI.ChatCompletionCreateParams; - export import CompletionCreateParams = CompletionsAPI.CompletionCreateParams; - export import ChatCompletionCreateParamsNonStreaming = CompletionsAPI.ChatCompletionCreateParamsNonStreaming; - export import CompletionCreateParamsNonStreaming = CompletionsAPI.CompletionCreateParamsNonStreaming; - export import ChatCompletionCreateParamsStreaming = CompletionsAPI.ChatCompletionCreateParamsStreaming; - export import CompletionCreateParamsStreaming = CompletionsAPI.CompletionCreateParamsStreaming; +Chat.Completions = Completions; + +export declare namespace Chat { + export { type ChatModel as ChatModel }; + + export { + Completions as Completions, + type ChatCompletion as ChatCompletion, + type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam, + type ChatCompletionAudio as ChatCompletionAudio, + type ChatCompletionAudioParam as ChatCompletionAudioParam, + type ChatCompletionChunk as ChatCompletionChunk, + type ChatCompletionContentPart as ChatCompletionContentPart, + type ChatCompletionContentPartImage as ChatCompletionContentPartImage, + type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, + type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, + type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, + type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, + type ChatCompletionMessage as ChatCompletionMessage, + type ChatCompletionMessageParam as ChatCompletionMessageParam, + type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, + type ChatCompletionModality as ChatCompletionModality, + type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, + type ChatCompletionRole as ChatCompletionRole, + type ChatCompletionStreamOptions as ChatCompletionStreamOptions, + type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, + type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, + type ChatCompletionTool as ChatCompletionTool, + type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, + type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, + type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + type ChatCompletionCreateParams as ChatCompletionCreateParams, + type CompletionCreateParams as CompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + }; } diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index d439e9a25..430e52bb2 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -1185,40 +1185,39 @@ export interface ChatCompletionCreateParamsStreaming extends ChatCompletionCreat */ export type CompletionCreateParamsStreaming = ChatCompletionCreateParamsStreaming; -export namespace Completions { - export import ChatCompletion = ChatCompletionsAPI.ChatCompletion; - export import ChatCompletionAssistantMessageParam = ChatCompletionsAPI.ChatCompletionAssistantMessageParam; - export import ChatCompletionAudio = ChatCompletionsAPI.ChatCompletionAudio; - export import ChatCompletionAudioParam = ChatCompletionsAPI.ChatCompletionAudioParam; - export import ChatCompletionChunk = ChatCompletionsAPI.ChatCompletionChunk; - export import ChatCompletionContentPart = ChatCompletionsAPI.ChatCompletionContentPart; - export import ChatCompletionContentPartImage = ChatCompletionsAPI.ChatCompletionContentPartImage; - export import ChatCompletionContentPartInputAudio = ChatCompletionsAPI.ChatCompletionContentPartInputAudio; - export import ChatCompletionContentPartRefusal = ChatCompletionsAPI.ChatCompletionContentPartRefusal; - export import ChatCompletionContentPartText = ChatCompletionsAPI.ChatCompletionContentPartText; - export import ChatCompletionFunctionCallOption = ChatCompletionsAPI.ChatCompletionFunctionCallOption; - export import ChatCompletionFunctionMessageParam = ChatCompletionsAPI.ChatCompletionFunctionMessageParam; - export import ChatCompletionMessage = ChatCompletionsAPI.ChatCompletionMessage; - export import ChatCompletionMessageParam = ChatCompletionsAPI.ChatCompletionMessageParam; - export import ChatCompletionMessageToolCall = ChatCompletionsAPI.ChatCompletionMessageToolCall; - export import ChatCompletionModality = ChatCompletionsAPI.ChatCompletionModality; - export import ChatCompletionNamedToolChoice = ChatCompletionsAPI.ChatCompletionNamedToolChoice; - export import ChatCompletionRole = ChatCompletionsAPI.ChatCompletionRole; - export import ChatCompletionStreamOptions = ChatCompletionsAPI.ChatCompletionStreamOptions; - export import ChatCompletionSystemMessageParam = ChatCompletionsAPI.ChatCompletionSystemMessageParam; - export import ChatCompletionTokenLogprob = ChatCompletionsAPI.ChatCompletionTokenLogprob; - export import ChatCompletionTool = ChatCompletionsAPI.ChatCompletionTool; - export import ChatCompletionToolChoiceOption = ChatCompletionsAPI.ChatCompletionToolChoiceOption; - export import ChatCompletionToolMessageParam = ChatCompletionsAPI.ChatCompletionToolMessageParam; - export import ChatCompletionUserMessageParam = ChatCompletionsAPI.ChatCompletionUserMessageParam; - /** - * @deprecated ChatCompletionMessageParam should be used instead - */ - export import CreateChatCompletionRequestMessage = ChatCompletionsAPI.CreateChatCompletionRequestMessage; - export import ChatCompletionCreateParams = ChatCompletionsAPI.ChatCompletionCreateParams; - export import CompletionCreateParams = ChatCompletionsAPI.CompletionCreateParams; - export import ChatCompletionCreateParamsNonStreaming = ChatCompletionsAPI.ChatCompletionCreateParamsNonStreaming; - export import CompletionCreateParamsNonStreaming = ChatCompletionsAPI.CompletionCreateParamsNonStreaming; - export import ChatCompletionCreateParamsStreaming = ChatCompletionsAPI.ChatCompletionCreateParamsStreaming; - export import CompletionCreateParamsStreaming = ChatCompletionsAPI.CompletionCreateParamsStreaming; +export declare namespace Completions { + export { + type ChatCompletion as ChatCompletion, + type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam, + type ChatCompletionAudio as ChatCompletionAudio, + type ChatCompletionAudioParam as ChatCompletionAudioParam, + type ChatCompletionChunk as ChatCompletionChunk, + type ChatCompletionContentPart as ChatCompletionContentPart, + type ChatCompletionContentPartImage as ChatCompletionContentPartImage, + type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, + type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, + type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, + type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, + type ChatCompletionMessage as ChatCompletionMessage, + type ChatCompletionMessageParam as ChatCompletionMessageParam, + type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, + type ChatCompletionModality as ChatCompletionModality, + type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, + type ChatCompletionRole as ChatCompletionRole, + type ChatCompletionStreamOptions as ChatCompletionStreamOptions, + type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, + type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, + type ChatCompletionTool as ChatCompletionTool, + type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, + type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, + type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + type ChatCompletionCreateParams as ChatCompletionCreateParams, + type CompletionCreateParams as CompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + }; } diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index 22803e819..d9366bf74 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -1,38 +1,38 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export { Chat, type ChatModel } from './chat'; export { - ChatCompletion, - ChatCompletionAssistantMessageParam, - ChatCompletionAudio, - ChatCompletionAudioParam, - ChatCompletionChunk, - ChatCompletionContentPart, - ChatCompletionContentPartImage, - ChatCompletionContentPartInputAudio, - ChatCompletionContentPartRefusal, - ChatCompletionContentPartText, - ChatCompletionFunctionCallOption, - ChatCompletionFunctionMessageParam, - ChatCompletionMessage, - ChatCompletionMessageParam, - ChatCompletionMessageToolCall, - ChatCompletionModality, - ChatCompletionNamedToolChoice, - ChatCompletionRole, - ChatCompletionStreamOptions, - ChatCompletionSystemMessageParam, - ChatCompletionTokenLogprob, - ChatCompletionTool, - ChatCompletionToolChoiceOption, - ChatCompletionToolMessageParam, - ChatCompletionUserMessageParam, - CreateChatCompletionRequestMessage, - ChatCompletionCreateParams, - CompletionCreateParams, - ChatCompletionCreateParamsNonStreaming, - CompletionCreateParamsNonStreaming, - ChatCompletionCreateParamsStreaming, - CompletionCreateParamsStreaming, Completions, + type ChatCompletion, + type ChatCompletionAssistantMessageParam, + type ChatCompletionAudio, + type ChatCompletionAudioParam, + type ChatCompletionChunk, + type ChatCompletionContentPart, + type ChatCompletionContentPartImage, + type ChatCompletionContentPartInputAudio, + type ChatCompletionContentPartRefusal, + type ChatCompletionContentPartText, + type ChatCompletionFunctionCallOption, + type ChatCompletionFunctionMessageParam, + type ChatCompletionMessage, + type ChatCompletionMessageParam, + type ChatCompletionMessageToolCall, + type ChatCompletionModality, + type ChatCompletionNamedToolChoice, + type ChatCompletionRole, + type ChatCompletionStreamOptions, + type ChatCompletionSystemMessageParam, + type ChatCompletionTokenLogprob, + type ChatCompletionTool, + type ChatCompletionToolChoiceOption, + type ChatCompletionToolMessageParam, + type ChatCompletionUserMessageParam, + type CreateChatCompletionRequestMessage, + type ChatCompletionCreateParams, + type CompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming, + type CompletionCreateParamsNonStreaming, + type ChatCompletionCreateParamsStreaming, + type CompletionCreateParamsStreaming, } from './completions'; -export { ChatModel, Chat } from './chat'; diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 7acd5d13f..94c4581a1 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -361,11 +361,13 @@ export interface CompletionCreateParamsStreaming extends CompletionCreateParamsB stream: true; } -export namespace Completions { - export import Completion = CompletionsAPI.Completion; - export import CompletionChoice = CompletionsAPI.CompletionChoice; - export import CompletionUsage = CompletionsAPI.CompletionUsage; - export import CompletionCreateParams = CompletionsAPI.CompletionCreateParams; - export import CompletionCreateParamsNonStreaming = CompletionsAPI.CompletionCreateParamsNonStreaming; - export import CompletionCreateParamsStreaming = CompletionsAPI.CompletionCreateParamsStreaming; +export declare namespace Completions { + export { + type Completion as Completion, + type CompletionChoice as CompletionChoice, + type CompletionUsage as CompletionUsage, + type CompletionCreateParams as CompletionCreateParams, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + }; } diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index 6d8e670a7..e2b35f530 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -2,7 +2,6 @@ import { APIResource } from '../resource'; import * as Core from '../core'; -import * as EmbeddingsAPI from './embeddings'; export class Embeddings extends APIResource { /** @@ -120,9 +119,11 @@ export interface EmbeddingCreateParams { user?: string; } -export namespace Embeddings { - export import CreateEmbeddingResponse = EmbeddingsAPI.CreateEmbeddingResponse; - export import Embedding = EmbeddingsAPI.Embedding; - export import EmbeddingModel = EmbeddingsAPI.EmbeddingModel; - export import EmbeddingCreateParams = EmbeddingsAPI.EmbeddingCreateParams; +export declare namespace Embeddings { + export { + type CreateEmbeddingResponse as CreateEmbeddingResponse, + type Embedding as Embedding, + type EmbeddingModel as EmbeddingModel, + type EmbeddingCreateParams as EmbeddingCreateParams, + }; } diff --git a/src/resources/files.ts b/src/resources/files.ts index ba01a9041..dec815a28 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -5,7 +5,6 @@ import { isRequestOptions } from '../core'; import { sleep } from '../core'; import { APIConnectionTimeoutError } from '../error'; import * as Core from '../core'; -import * as FilesAPI from './files'; import { Page } from '../pagination'; import { type Response } from '../_shims/index'; @@ -221,12 +220,16 @@ export interface FileListParams { purpose?: string; } -export namespace Files { - export import FileContent = FilesAPI.FileContent; - export import FileDeleted = FilesAPI.FileDeleted; - export import FileObject = FilesAPI.FileObject; - export import FilePurpose = FilesAPI.FilePurpose; - export import FileObjectsPage = FilesAPI.FileObjectsPage; - export import FileCreateParams = FilesAPI.FileCreateParams; - export import FileListParams = FilesAPI.FileListParams; +Files.FileObjectsPage = FileObjectsPage; + +export declare namespace Files { + export { + type FileContent as FileContent, + type FileDeleted as FileDeleted, + type FileObject as FileObject, + type FilePurpose as FilePurpose, + FileObjectsPage as FileObjectsPage, + type FileCreateParams as FileCreateParams, + type FileListParams as FileListParams, + }; } diff --git a/src/resources/fine-tuning/fine-tuning.ts b/src/resources/fine-tuning/fine-tuning.ts index b1ba34ecf..df013c8ec 100644 --- a/src/resources/fine-tuning/fine-tuning.ts +++ b/src/resources/fine-tuning/fine-tuning.ts @@ -2,21 +2,40 @@ import { APIResource } from '../../resource'; import * as JobsAPI from './jobs/jobs'; +import { + FineTuningJob, + FineTuningJobEvent, + FineTuningJobEventsPage, + FineTuningJobIntegration, + FineTuningJobWandbIntegration, + FineTuningJobWandbIntegrationObject, + FineTuningJobsPage, + JobCreateParams, + JobListEventsParams, + JobListParams, + Jobs, +} from './jobs/jobs'; export class FineTuning extends APIResource { jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client); } -export namespace FineTuning { - export import Jobs = JobsAPI.Jobs; - export import FineTuningJob = JobsAPI.FineTuningJob; - export import FineTuningJobEvent = JobsAPI.FineTuningJobEvent; - export import FineTuningJobIntegration = JobsAPI.FineTuningJobIntegration; - export import FineTuningJobWandbIntegration = JobsAPI.FineTuningJobWandbIntegration; - export import FineTuningJobWandbIntegrationObject = JobsAPI.FineTuningJobWandbIntegrationObject; - export import FineTuningJobsPage = JobsAPI.FineTuningJobsPage; - export import FineTuningJobEventsPage = JobsAPI.FineTuningJobEventsPage; - export import JobCreateParams = JobsAPI.JobCreateParams; - export import JobListParams = JobsAPI.JobListParams; - export import JobListEventsParams = JobsAPI.JobListEventsParams; +FineTuning.Jobs = Jobs; +FineTuning.FineTuningJobsPage = FineTuningJobsPage; +FineTuning.FineTuningJobEventsPage = FineTuningJobEventsPage; + +export declare namespace FineTuning { + export { + Jobs as Jobs, + type FineTuningJob as FineTuningJob, + type FineTuningJobEvent as FineTuningJobEvent, + type FineTuningJobIntegration as FineTuningJobIntegration, + type FineTuningJobWandbIntegration as FineTuningJobWandbIntegration, + type FineTuningJobWandbIntegrationObject as FineTuningJobWandbIntegrationObject, + FineTuningJobsPage as FineTuningJobsPage, + FineTuningJobEventsPage as FineTuningJobEventsPage, + type JobCreateParams as JobCreateParams, + type JobListParams as JobListParams, + type JobListEventsParams as JobListEventsParams, + }; } diff --git a/src/resources/fine-tuning/index.ts b/src/resources/fine-tuning/index.ts index 1d8739a0a..4954406b8 100644 --- a/src/resources/fine-tuning/index.ts +++ b/src/resources/fine-tuning/index.ts @@ -2,15 +2,15 @@ export { FineTuning } from './fine-tuning'; export { - FineTuningJob, - FineTuningJobEvent, - FineTuningJobIntegration, - FineTuningJobWandbIntegration, - FineTuningJobWandbIntegrationObject, - JobCreateParams, - JobListParams, - JobListEventsParams, FineTuningJobsPage, FineTuningJobEventsPage, Jobs, + type FineTuningJob, + type FineTuningJobEvent, + type FineTuningJobIntegration, + type FineTuningJobWandbIntegration, + type FineTuningJobWandbIntegrationObject, + type JobCreateParams, + type JobListParams, + type JobListEventsParams, } from './jobs/index'; diff --git a/src/resources/fine-tuning/jobs/checkpoints.ts b/src/resources/fine-tuning/jobs/checkpoints.ts index 02896b26d..b3018ac5f 100644 --- a/src/resources/fine-tuning/jobs/checkpoints.ts +++ b/src/resources/fine-tuning/jobs/checkpoints.ts @@ -3,7 +3,6 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; -import * as CheckpointsAPI from './checkpoints'; import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Checkpoints extends APIResource { @@ -101,8 +100,12 @@ export namespace FineTuningJobCheckpoint { export interface CheckpointListParams extends CursorPageParams {} -export namespace Checkpoints { - export import FineTuningJobCheckpoint = CheckpointsAPI.FineTuningJobCheckpoint; - export import FineTuningJobCheckpointsPage = CheckpointsAPI.FineTuningJobCheckpointsPage; - export import CheckpointListParams = CheckpointsAPI.CheckpointListParams; +Checkpoints.FineTuningJobCheckpointsPage = FineTuningJobCheckpointsPage; + +export declare namespace Checkpoints { + export { + type FineTuningJobCheckpoint as FineTuningJobCheckpoint, + FineTuningJobCheckpointsPage as FineTuningJobCheckpointsPage, + type CheckpointListParams as CheckpointListParams, + }; } diff --git a/src/resources/fine-tuning/jobs/index.ts b/src/resources/fine-tuning/jobs/index.ts index 275c776e9..7a05b48b2 100644 --- a/src/resources/fine-tuning/jobs/index.ts +++ b/src/resources/fine-tuning/jobs/index.ts @@ -1,21 +1,21 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { - FineTuningJob, - FineTuningJobEvent, - FineTuningJobIntegration, - FineTuningJobWandbIntegration, - FineTuningJobWandbIntegrationObject, - JobCreateParams, - JobListParams, - JobListEventsParams, + FineTuningJobCheckpointsPage, + Checkpoints, + type FineTuningJobCheckpoint, + type CheckpointListParams, +} from './checkpoints'; +export { FineTuningJobsPage, FineTuningJobEventsPage, Jobs, + type FineTuningJob, + type FineTuningJobEvent, + type FineTuningJobIntegration, + type FineTuningJobWandbIntegration, + type FineTuningJobWandbIntegrationObject, + type JobCreateParams, + type JobListParams, + type JobListEventsParams, } from './jobs'; -export { - FineTuningJobCheckpoint, - CheckpointListParams, - FineTuningJobCheckpointsPage, - Checkpoints, -} from './checkpoints'; diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 54b5c4e6a..275fad869 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -3,8 +3,13 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; -import * as JobsAPI from './jobs'; import * as CheckpointsAPI from './checkpoints'; +import { + CheckpointListParams, + Checkpoints, + FineTuningJobCheckpoint, + FineTuningJobCheckpointsPage, +} from './checkpoints'; import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Jobs extends APIResource { @@ -445,19 +450,29 @@ export interface JobListParams extends CursorPageParams {} export interface JobListEventsParams extends CursorPageParams {} -export namespace Jobs { - export import FineTuningJob = JobsAPI.FineTuningJob; - export import FineTuningJobEvent = JobsAPI.FineTuningJobEvent; - export import FineTuningJobIntegration = JobsAPI.FineTuningJobIntegration; - export import FineTuningJobWandbIntegration = JobsAPI.FineTuningJobWandbIntegration; - export import FineTuningJobWandbIntegrationObject = JobsAPI.FineTuningJobWandbIntegrationObject; - export import FineTuningJobsPage = JobsAPI.FineTuningJobsPage; - export import FineTuningJobEventsPage = JobsAPI.FineTuningJobEventsPage; - export import JobCreateParams = JobsAPI.JobCreateParams; - export import JobListParams = JobsAPI.JobListParams; - export import JobListEventsParams = JobsAPI.JobListEventsParams; - export import Checkpoints = CheckpointsAPI.Checkpoints; - export import FineTuningJobCheckpoint = CheckpointsAPI.FineTuningJobCheckpoint; - export import FineTuningJobCheckpointsPage = CheckpointsAPI.FineTuningJobCheckpointsPage; - export import CheckpointListParams = CheckpointsAPI.CheckpointListParams; +Jobs.FineTuningJobsPage = FineTuningJobsPage; +Jobs.FineTuningJobEventsPage = FineTuningJobEventsPage; +Jobs.Checkpoints = Checkpoints; +Jobs.FineTuningJobCheckpointsPage = FineTuningJobCheckpointsPage; + +export declare namespace Jobs { + export { + type FineTuningJob as FineTuningJob, + type FineTuningJobEvent as FineTuningJobEvent, + type FineTuningJobIntegration as FineTuningJobIntegration, + type FineTuningJobWandbIntegration as FineTuningJobWandbIntegration, + type FineTuningJobWandbIntegrationObject as FineTuningJobWandbIntegrationObject, + FineTuningJobsPage as FineTuningJobsPage, + FineTuningJobEventsPage as FineTuningJobEventsPage, + type JobCreateParams as JobCreateParams, + type JobListParams as JobListParams, + type JobListEventsParams as JobListEventsParams, + }; + + export { + Checkpoints as Checkpoints, + type FineTuningJobCheckpoint as FineTuningJobCheckpoint, + FineTuningJobCheckpointsPage as FineTuningJobCheckpointsPage, + type CheckpointListParams as CheckpointListParams, + }; } diff --git a/src/resources/images.ts b/src/resources/images.ts index fdd0b8881..f4d59b941 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -2,7 +2,6 @@ import { APIResource } from '../resource'; import * as Core from '../core'; -import * as ImagesAPI from './images'; export class Images extends APIResource { /** @@ -207,11 +206,13 @@ export interface ImageGenerateParams { user?: string; } -export namespace Images { - export import Image = ImagesAPI.Image; - export import ImageModel = ImagesAPI.ImageModel; - export import ImagesResponse = ImagesAPI.ImagesResponse; - export import ImageCreateVariationParams = ImagesAPI.ImageCreateVariationParams; - export import ImageEditParams = ImagesAPI.ImageEditParams; - export import ImageGenerateParams = ImagesAPI.ImageGenerateParams; +export declare namespace Images { + export { + type Image as Image, + type ImageModel as ImageModel, + type ImagesResponse as ImagesResponse, + type ImageCreateVariationParams as ImageCreateVariationParams, + type ImageEditParams as ImageEditParams, + type ImageGenerateParams as ImageGenerateParams, + }; } diff --git a/src/resources/index.ts b/src/resources/index.ts index 15c5db77f..ad0302357 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -2,62 +2,62 @@ export * from './chat/index'; export * from './shared'; -export { AudioModel, AudioResponseFormat, Audio } from './audio/audio'; +export { Audio, type AudioModel, type AudioResponseFormat } from './audio/audio'; export { - Batch, - BatchError, - BatchRequestCounts, - BatchCreateParams, - BatchListParams, BatchesPage, Batches, + type Batch, + type BatchError, + type BatchRequestCounts, + type BatchCreateParams, + type BatchListParams, } from './batches'; export { Beta } from './beta/beta'; export { - Completion, - CompletionChoice, - CompletionUsage, - CompletionCreateParams, - CompletionCreateParamsNonStreaming, - CompletionCreateParamsStreaming, Completions, + type Completion, + type CompletionChoice, + type CompletionUsage, + type CompletionCreateParams, + type CompletionCreateParamsNonStreaming, + type CompletionCreateParamsStreaming, } from './completions'; export { - CreateEmbeddingResponse, - Embedding, - EmbeddingModel, - EmbeddingCreateParams, Embeddings, + type CreateEmbeddingResponse, + type Embedding, + type EmbeddingModel, + type EmbeddingCreateParams, } from './embeddings'; export { - FileContent, - FileDeleted, - FileObject, - FilePurpose, - FileCreateParams, - FileListParams, FileObjectsPage, Files, + type FileContent, + type FileDeleted, + type FileObject, + type FilePurpose, + type FileCreateParams, + type FileListParams, } from './files'; export { FineTuning } from './fine-tuning/fine-tuning'; export { - Image, - ImageModel, - ImagesResponse, - ImageCreateVariationParams, - ImageEditParams, - ImageGenerateParams, Images, + type Image, + type ImageModel, + type ImagesResponse, + type ImageCreateVariationParams, + type ImageEditParams, + type ImageGenerateParams, } from './images'; -export { Model, ModelDeleted, ModelsPage, Models } from './models'; +export { ModelsPage, Models, type Model, type ModelDeleted } from './models'; export { - Moderation, - ModerationImageURLInput, - ModerationModel, - ModerationMultiModalInput, - ModerationTextInput, - ModerationCreateResponse, - ModerationCreateParams, Moderations, + type Moderation, + type ModerationImageURLInput, + type ModerationModel, + type ModerationMultiModalInput, + type ModerationTextInput, + type ModerationCreateResponse, + type ModerationCreateParams, } from './moderations'; -export { Upload, UploadCreateParams, UploadCompleteParams, Uploads } from './uploads/uploads'; +export { Uploads, type Upload, type UploadCreateParams, type UploadCompleteParams } from './uploads/uploads'; diff --git a/src/resources/models.ts b/src/resources/models.ts index 178915747..6d8cd5296 100644 --- a/src/resources/models.ts +++ b/src/resources/models.ts @@ -2,7 +2,6 @@ import { APIResource } from '../resource'; import * as Core from '../core'; -import * as ModelsAPI from './models'; import { Page } from '../pagination'; export class Models extends APIResource { @@ -69,8 +68,8 @@ export interface ModelDeleted { object: string; } -export namespace Models { - export import Model = ModelsAPI.Model; - export import ModelDeleted = ModelsAPI.ModelDeleted; - export import ModelsPage = ModelsAPI.ModelsPage; +Models.ModelsPage = ModelsPage; + +export declare namespace Models { + export { type Model as Model, type ModelDeleted as ModelDeleted, ModelsPage as ModelsPage }; } diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index ba800509e..cdde12a62 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -2,7 +2,6 @@ import { APIResource } from '../resource'; import * as Core from '../core'; -import * as ModerationsAPI from './moderations'; export class Moderations extends APIResource { /** @@ -357,12 +356,14 @@ export interface ModerationCreateParams { model?: (string & {}) | ModerationModel; } -export namespace Moderations { - export import Moderation = ModerationsAPI.Moderation; - export import ModerationImageURLInput = ModerationsAPI.ModerationImageURLInput; - export import ModerationModel = ModerationsAPI.ModerationModel; - export import ModerationMultiModalInput = ModerationsAPI.ModerationMultiModalInput; - export import ModerationTextInput = ModerationsAPI.ModerationTextInput; - export import ModerationCreateResponse = ModerationsAPI.ModerationCreateResponse; - export import ModerationCreateParams = ModerationsAPI.ModerationCreateParams; +export declare namespace Moderations { + export { + type Moderation as Moderation, + type ModerationImageURLInput as ModerationImageURLInput, + type ModerationModel as ModerationModel, + type ModerationMultiModalInput as ModerationMultiModalInput, + type ModerationTextInput as ModerationTextInput, + type ModerationCreateResponse as ModerationCreateResponse, + type ModerationCreateParams as ModerationCreateParams, + }; } diff --git a/src/resources/uploads/index.ts b/src/resources/uploads/index.ts index 1a353d312..200d3567e 100644 --- a/src/resources/uploads/index.ts +++ b/src/resources/uploads/index.ts @@ -1,4 +1,4 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { Upload, UploadCreateParams, UploadCompleteParams, Uploads } from './uploads'; -export { UploadPart, PartCreateParams, Parts } from './parts'; +export { Parts, type UploadPart, type PartCreateParams } from './parts'; +export { Uploads, type Upload, type UploadCreateParams, type UploadCompleteParams } from './uploads'; diff --git a/src/resources/uploads/parts.ts b/src/resources/uploads/parts.ts index a4af5c606..9b54c99e6 100644 --- a/src/resources/uploads/parts.ts +++ b/src/resources/uploads/parts.ts @@ -2,7 +2,6 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; -import * as PartsAPI from './parts'; export class Parts extends APIResource { /** @@ -62,7 +61,6 @@ export interface PartCreateParams { data: Core.Uploadable; } -export namespace Parts { - export import UploadPart = PartsAPI.UploadPart; - export import PartCreateParams = PartsAPI.PartCreateParams; +export declare namespace Parts { + export { type UploadPart as UploadPart, type PartCreateParams as PartCreateParams }; } diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts index 1c3ed708d..78fa3a7b5 100644 --- a/src/resources/uploads/uploads.ts +++ b/src/resources/uploads/uploads.ts @@ -2,9 +2,9 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; -import * as UploadsAPI from './uploads'; import * as FilesAPI from '../files'; import * as PartsAPI from './parts'; +import { PartCreateParams, Parts, UploadPart } from './parts'; export class Uploads extends APIResource { parts: PartsAPI.Parts = new PartsAPI.Parts(this._client); @@ -159,11 +159,14 @@ export interface UploadCompleteParams { md5?: string; } -export namespace Uploads { - export import Upload = UploadsAPI.Upload; - export import UploadCreateParams = UploadsAPI.UploadCreateParams; - export import UploadCompleteParams = UploadsAPI.UploadCompleteParams; - export import Parts = PartsAPI.Parts; - export import UploadPart = PartsAPI.UploadPart; - export import PartCreateParams = PartsAPI.PartCreateParams; +Uploads.Parts = Parts; + +export declare namespace Uploads { + export { + type Upload as Upload, + type UploadCreateParams as UploadCreateParams, + type UploadCompleteParams as UploadCompleteParams, + }; + + export { Parts as Parts, type UploadPart as UploadPart, type PartCreateParams as PartCreateParams }; } diff --git a/tsconfig.json b/tsconfig.json index 5f99085fc..09a702fca 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -11,7 +11,7 @@ "paths": { "openai/_shims/auto/*": ["src/_shims/auto/*-node"], "openai/*": ["src/*"], - "openai": ["src/index.ts"], + "openai": ["src/index.ts"] }, "noEmit": true, From 362d868426e5777183a52da8df432fa34f722442 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Fri, 1 Nov 2024 16:54:12 +0000 Subject: [PATCH 016/246] feat: publish to jsr (#1165) --- .github/workflows/create-releases.yml | 22 +-- .github/workflows/publish-deno.yml | 44 ----- .github/workflows/publish-jsr.yml | 30 ++++ .gitignore | 2 +- README.md | 6 +- bin/publish-jsr | 11 ++ jsr.json | 8 + release-please-config.json | 2 +- scripts/build-deno | 41 +---- scripts/git-publish-deno.sh | 77 --------- scripts/utils/denoify.ts | 226 -------------------------- src/core.ts | 10 +- src/error.ts | 2 +- src/index.ts | 28 ++-- src/streaming.ts | 4 +- tsconfig.deno.json | 11 +- tsconfig.json | 1 + 17 files changed, 88 insertions(+), 437 deletions(-) delete mode 100644 .github/workflows/publish-deno.yml create mode 100644 .github/workflows/publish-jsr.yml create mode 100644 bin/publish-jsr create mode 100644 jsr.json delete mode 100755 scripts/git-publish-deno.sh delete mode 100644 scripts/utils/denoify.ts diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml index d5ae1f755..3a753b31c 100644 --- a/.github/workflows/create-releases.yml +++ b/.github/workflows/create-releases.yml @@ -22,27 +22,12 @@ jobs: repo: ${{ github.event.repository.full_name }} stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} - - name: Generate a token - id: generate_token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ secrets.APP_ID }} - private-key: ${{ secrets.APP_PRIVATE_KEY }} - owner: 'openai' - repositories: 'openai-node,openai-deno-build' - - name: Set up Node if: ${{ steps.release.outputs.releases_created }} uses: actions/setup-node@v3 with: node-version: '18' - - name: Set up Deno - if: ${{ steps.release.outputs.releases_created }} - uses: denoland/setup-deno@v1 - with: - deno-version: v1.35.1 - - name: Install dependencies if: ${{ steps.release.outputs.releases_created }} run: | @@ -55,11 +40,8 @@ jobs: env: NPM_TOKEN: ${{ secrets.OPENAI_NPM_TOKEN || secrets.NPM_TOKEN }} - - name: Publish to Deno + - name: Publish to JSR if: ${{ steps.release.outputs.releases_created }} run: | - bash ./scripts/git-publish-deno.sh - env: - DENO_PUSH_REMOTE_URL: https://username:${{ steps.generate_token.outputs.token }}@github.com/openai/openai-deno-build.git - DENO_PUSH_BRANCH: main + bash ./bin/publish-jsr diff --git a/.github/workflows/publish-deno.yml b/.github/workflows/publish-deno.yml deleted file mode 100644 index 894c516a0..000000000 --- a/.github/workflows/publish-deno.yml +++ /dev/null @@ -1,44 +0,0 @@ -# workflow for re-running publishing to Deno in case it fails for some reason -# you can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-deno.yml -name: Publish Deno -on: - workflow_dispatch: - -jobs: - publish: - name: publish - runs-on: ubuntu-latest - environment: publish - - steps: - - uses: actions/checkout@v4 - - - name: Generate a token - id: generate_token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ secrets.APP_ID }} - private-key: ${{ secrets.APP_PRIVATE_KEY }} - owner: 'openai' - repositories: 'openai-node,openai-deno-build' - - - name: Set up Node - uses: actions/setup-node@v3 - with: - node-version: '18' - - - name: Set up Deno - uses: denoland/setup-deno@v1 - with: - deno-version: v1.35.1 - - - name: Install dependencies - run: | - yarn install - - - name: Publish to Deno - run: | - bash ./scripts/git-publish-deno.sh - env: - DENO_PUSH_REMOTE_URL: https://username:${{ steps.generate_token.outputs.token }}@github.com/openai/openai-deno-build.git - DENO_PUSH_BRANCH: main diff --git a/.github/workflows/publish-jsr.yml b/.github/workflows/publish-jsr.yml new file mode 100644 index 000000000..1e46d6bfb --- /dev/null +++ b/.github/workflows/publish-jsr.yml @@ -0,0 +1,30 @@ +# workflow for re-running publishing to JSR in case it fails for some reason +# you can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-jsr.yml +name: Publish JSR +on: + workflow_dispatch: + +jobs: + publish: + name: publish + runs-on: ubuntu-latest + permissions: + contents: read + id-token: write + environment: publish + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node + uses: actions/setup-node@v3 + with: + node-version: '18' + + - name: Install dependencies + run: | + yarn install + + - name: Publish to JSR + run: | + bash ./bin/publish-jsr diff --git a/.gitignore b/.gitignore index 0af7568e5..81c4c41ca 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,7 @@ yarn-error.log codegen.log Brewfile.lock.json dist -/deno +dist-deno /*.tgz .idea/ tmp diff --git a/README.md b/README.md index 776ea4049..caa3f9d4a 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # OpenAI Node API Library -[![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/openai) +[![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/openai) [![JSR Version](https://jsr.io/badges/@openai/openai)](https://jsr.io/@openai/openai) This library provides convenient access to the OpenAI REST API from TypeScript or JavaScript. @@ -14,12 +14,12 @@ To learn how to use the OpenAI API, check out our [API Reference](https://platfo npm install openai ``` -You can import in Deno via: +You can also import from jsr: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.69.0/mod.ts'; +import OpenAI from 'jsr:@openai/openai'; ``` diff --git a/bin/publish-jsr b/bin/publish-jsr new file mode 100644 index 000000000..1b7365087 --- /dev/null +++ b/bin/publish-jsr @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -eux + +# Build the project +yarn build + +# Navigate to the dist directory +cd dist-deno + +npx jsr publish ${JSR_TOKEN:+"--token=$JSR_TOKEN"} diff --git a/jsr.json b/jsr.json new file mode 100644 index 000000000..fefb5b291 --- /dev/null +++ b/jsr.json @@ -0,0 +1,8 @@ +{ + "name": "@openai/openai", + "version": "4.47.1", + "exports": "./index.ts", + "publish": { + "exclude": ["!."] + } +} diff --git a/release-please-config.json b/release-please-config.json index 0a9347796..377a76e99 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -63,6 +63,6 @@ "extra-files": [ "src/version.ts", "README.md", - "scripts/build-deno" + "jsr.json" ] } diff --git a/scripts/build-deno b/scripts/build-deno index be17942df..7d542cf24 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -4,47 +4,16 @@ set -exuo pipefail cd "$(dirname "$0")/.." -rm -rf deno; mkdir deno -cp -rp src/* deno +rm -rf dist-deno; mkdir dist-deno +cp -rp src/* jsr.json dist-deno -# x-release-please-start-version -cat << EOF > deno/README.md -# OpenAI Node API Library - Deno build - -This is a build produced from https://github.com/openai/openai-node – please go there to read the source and docs, file issues, etc. - -Usage: - -\`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.69.0/mod.ts"; - -const client = new OpenAI(); -\`\`\` - -Note that in most Deno environments, you can also do this: - -\`\`\`ts -import OpenAI from "npm:openai"; -\`\`\` -EOF -# x-release-please-end - -rm deno/_shims/auto/*-node.ts -for dir in deno/_shims deno/_shims/auto; do +rm dist-deno/_shims/auto/*-node.ts +for dir in dist-deno/_shims dist-deno/_shims/auto; do rm "${dir}"/*.{d.ts,js,mjs} for file in "${dir}"/*-deno.ts; do mv -- "$file" "${file%-deno.ts}.ts" done done for file in LICENSE CHANGELOG.md; do - if [ -e "${file}" ]; then cp "${file}" deno; fi + if [ -e "${file}" ]; then cp "${file}" dist-deno; fi done -npm exec ts-node -T -- scripts/utils/denoify.ts -deno fmt deno -deno check deno/mod.ts -if [ -e deno_tests ]; then - deno test deno_tests --allow-env -fi - -# make sure that nothing crashes when we load the Deno module -(cd deno && deno run mod.ts) diff --git a/scripts/git-publish-deno.sh b/scripts/git-publish-deno.sh deleted file mode 100755 index 701db735e..000000000 --- a/scripts/git-publish-deno.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bash - -set -exuo pipefail - -cd "$(dirname "$0")/.." - -# This script pushes the contents of the `deno` directory to the `deno` branch, -# and creates a `vx.x.x-deno` tag, so that Deno users can -# import OpenAI from "https://raw.githubusercontent.com/openai/openai-node/vx.x.x-deno/mod.ts" - -# It's also possible to publish to deno.land. You can do this by: -# - Creating a separate GitHub repo -# - Add the deno.land webhook to the repo as described at https://deno.com/add_module -# - Set the following environment variables when running this script: -# - DENO_PUSH_REMOTE_URL - the remote url of the separate GitHub repo -# - DENO_PUSH_BRANCH - the branch you want to push to in that repo (probably `main`) -# - DENO_MAIN_BRANCH - the branch you want as the main branch in that repo (probably `main`, sometimes `master`) -# - DENO_PUSH_VERSION - defaults to version in package.json -# - DENO_PUSH_RELEASE_TAG - defaults to v$DENO_PUSH_VERSION-deno - -die () { - echo >&2 "$@" - exit 1 -} - -# Allow caller to set the following environment variables, but provide defaults -# if unset -# : "${FOO:=bar}" sets FOO=bar unless it's set and non-empty -# https://stackoverflow.com/questions/307503/whats-a-concise-way-to-check-that-environment-variables-are-set-in-a-unix-shell -# https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html - -: "${DENO_PUSH_VERSION:=$(node -p 'require("./package.json").version')}" -: "${DENO_PUSH_BRANCH:=deno}" -: "${DENO_MAIN_BRANCH:=main}" -: "${DENO_PUSH_REMOTE_URL:=$(git remote get-url origin)}" -: "${DENO_GIT_USER_NAME:="Stainless Bot"}" -: "${DENO_GIT_USER_EMAIL:="bot@stainlessapi.com"}" -if [[ $DENO_PUSH_BRANCH = "deno" ]]; then - : "${DENO_PUSH_RELEASE_TAG:="v$DENO_PUSH_VERSION-deno"}" -else - : "${DENO_PUSH_RELEASE_TAG:="v$DENO_PUSH_VERSION"}" -fi - -if [ ! -e deno ]; then ./scripts/build; fi - -# We want to commit and push a branch where everything inside the deno -# directory is at root level in the branch. - -# We can do this by temporarily creating a git repository inside deno, -# committing files to the branch, and pushing it to the remote. - -cd deno -rm -rf .git -git init -b "$DENO_MAIN_BRANCH" -git remote add origin "$DENO_PUSH_REMOTE_URL" -if git fetch origin "$DENO_PUSH_RELEASE_TAG"; then - die "Tag $DENO_PUSH_RELEASE_TAG already exists" -fi -if git fetch origin "$DENO_PUSH_BRANCH"; then - # the branch already exists on the remote; "check out" the branch without - # changing files in the working directory - git branch "$DENO_PUSH_BRANCH" -t origin/"$DENO_PUSH_BRANCH" - git symbolic-ref HEAD refs/heads/"$DENO_PUSH_BRANCH" - git reset -else - # the branch doesn't exist on the remote yet - git checkout -b "$DENO_PUSH_BRANCH" -fi - -git config user.email "$DENO_GIT_USER_EMAIL" -git config user.name "$DENO_GIT_USER_NAME" - -git add . -git commit -m "chore(deno): release $DENO_PUSH_VERSION" -git tag -a "$DENO_PUSH_RELEASE_TAG" -m "release $DENO_PUSH_VERSION" -git push --tags --set-upstream origin "$DENO_PUSH_BRANCH" -rm -rf .git diff --git a/scripts/utils/denoify.ts b/scripts/utils/denoify.ts deleted file mode 100644 index 52705802a..000000000 --- a/scripts/utils/denoify.ts +++ /dev/null @@ -1,226 +0,0 @@ -import path from 'path'; -import * as tm from 'ts-morph'; -import { name as pkgName } from '../../package.json'; -import fs from 'fs'; - -const rootDir = path.resolve(__dirname, '../..'); -const denoDir = path.join(rootDir, 'deno'); -const tsConfigFilePath = path.join(rootDir, 'tsconfig.deno.json'); - -async function denoify() { - const project = new tm.Project({ tsConfigFilePath }); - - for (const file of project.getSourceFiles()) { - if (!file.getFilePath().startsWith(denoDir + '/')) continue; - - let addedBuffer = false, - addedProcess = false; - file.forEachDescendant((node) => { - switch (node.getKind()) { - case tm.ts.SyntaxKind.ExportDeclaration: { - const decl: tm.ExportDeclaration = node as any; - if (decl.isTypeOnly()) return; - for (const named of decl.getNamedExports()) { - // Convert `export { Foo } from './foo.ts'` - // to `export { type Foo } from './foo.ts'` - // if `./foo.ts` only exports types for `Foo` - if (!named.isTypeOnly() && !hasValueDeclarations(named)) { - named.replaceWithText(`type ${named.getText()}`); - } - } - break; - } - case tm.ts.SyntaxKind.ImportEqualsDeclaration: { - const decl: tm.ImportEqualsDeclaration = node as any; - if (decl.isTypeOnly()) return; - - const ref = decl.getModuleReference(); - if (!hasValueDeclarations(ref)) { - const params = isBuiltinType(ref.getType()) ? [] : ref.getType().getTypeArguments(); - if (params.length) { - const paramsStr = params.map((p: tm.TypeParameter) => p.getText()).join(', '); - const bindingsStr = params - .map((p: tm.TypeParameter) => p.getSymbol()?.getName() || p.getText()) - .join(', '); - decl.replaceWithText( - `export type ${decl.getName()}<${paramsStr}> = ${ref.getText()}<${bindingsStr}>`, - ); - } else { - decl.replaceWithText(`export type ${decl.getName()} = ${ref.getText()}`); - } - } - break; - } - case tm.ts.SyntaxKind.Identifier: { - const id = node as tm.Identifier; - if (!addedBuffer && id.getText() === 'Buffer') { - addedBuffer = true; - file?.addVariableStatement({ - declarations: [ - { - name: 'Buffer', - type: 'any', - }, - ], - hasDeclareKeyword: true, - }); - file?.addTypeAlias({ - name: 'Buffer', - type: 'any', - }); - } - if (!addedProcess && id.getText() === 'process') { - addedProcess = true; - file?.addVariableStatement({ - declarations: [ - { - name: 'process', - type: 'any', - }, - ], - hasDeclareKeyword: true, - }); - } - } - } - }); - } - - await project.save(); - - for (const file of project.getSourceFiles()) { - if (!file.getFilePath().startsWith(denoDir + '/')) continue; - for (const decl of [...file.getImportDeclarations(), ...file.getExportDeclarations()]) { - const moduleSpecifier = decl.getModuleSpecifier(); - if (!moduleSpecifier) continue; - let specifier = moduleSpecifier.getLiteralValue().replace(/^node:/, ''); - if (!specifier || specifier.startsWith('http')) continue; - - if (nodeStdModules.has(specifier)) { - // convert node builtins to deno.land/std - specifier = `https://deno.land/std@0.177.0/node/${specifier}.ts`; - } else if (specifier.startsWith(pkgName + '/')) { - // convert self-referencing module specifiers to relative paths - specifier = file.getRelativePathAsModuleSpecifierTo(denoDir + specifier.substring(pkgName.length)); - } else if (!decl.isModuleSpecifierRelative()) { - specifier = `npm:${specifier}`; - } - - if (specifier.startsWith('./') || specifier.startsWith('../')) { - // there may be CJS directory module specifiers that implicitly resolve - // to /index.ts. Add an explicit /index.ts to the end - const sourceFile = decl.getModuleSpecifierSourceFile(); - if (sourceFile && /\/index\.ts$/.test(sourceFile.getFilePath()) && !/\/mod\.ts$/.test(specifier)) { - if (/\/index(\.ts)?$/.test(specifier)) { - specifier = specifier.replace(/\/index(\.ts)?$/, '/mod.ts'); - } else { - specifier += '/mod.ts'; - } - } - // add explicit .ts file extensions to relative module specifiers - specifier = specifier.replace(/(\.[^./]*)?$/, '.ts'); - } - moduleSpecifier.replaceWithText(JSON.stringify(specifier)); - } - } - - await project.save(); - - await Promise.all( - project.getSourceFiles().map(async (f) => { - const filePath = f.getFilePath(); - if (filePath.endsWith('index.ts')) { - const newPath = filePath.replace(/index\.ts$/, 'mod.ts'); - await fs.promises.rename(filePath, newPath); - } - }), - ); -} - -const nodeStdModules = new Set([ - 'assert', - 'assertion_error', - 'async_hooks', - 'buffer', - 'child_process', - 'cluster', - 'console', - 'constants', - 'crypto', - 'dgram', - 'diagnostics_channel', - 'dns', - 'domain', - 'events', - 'fs', - 'global', - 'http', - 'http2', - 'https', - 'inspector', - 'module_all', - 'module_esm', - 'module', - 'net', - 'os', - 'path', - 'perf_hooks', - 'process', - 'punycode', - 'querystring', - 'readline', - 'repl', - 'stream', - 'string_decoder', - 'sys', - 'timers', - 'tls', - 'tty', - 'upstream_modules', - 'url', - 'util', - 'v8', - 'vm', - 'wasi', - 'worker_threads', - 'zlib', -]); - -const typeDeclarationKinds = new Set([ - tm.ts.SyntaxKind.InterfaceDeclaration, - tm.ts.SyntaxKind.ModuleDeclaration, - tm.ts.SyntaxKind.TypeAliasDeclaration, -]); - -const builtinTypeNames = new Set(['Array', 'Set', 'Map', 'Record', 'Promise']); - -function isBuiltinType(type: tm.Type): boolean { - const symbol = type.getSymbol(); - return ( - symbol != null && - builtinTypeNames.has(symbol.getName()) && - symbol.getDeclarations().some((d) => d.getSourceFile().getFilePath().includes('node_modules/typescript')) - ); -} - -function hasValueDeclarations(nodes?: tm.Node): boolean; -function hasValueDeclarations(nodes?: tm.Node[]): boolean; -function hasValueDeclarations(nodes?: tm.Node | tm.Node[]): boolean { - if (nodes && !Array.isArray(nodes)) { - return ( - !isBuiltinType(nodes.getType()) && hasValueDeclarations(nodes.getType().getSymbol()?.getDeclarations()) - ); - } - return nodes ? - nodes.some((n) => { - const parent = n.getParent(); - return ( - !typeDeclarationKinds.has(n.getKind()) && - // sometimes the node will be the right hand side of a type alias - (!parent || !typeDeclarationKinds.has(parent.getKind())) - ); - }) - : false; -} - -denoify(); diff --git a/src/core.ts b/src/core.ts index 9d90178ab..0c8e69ffc 100644 --- a/src/core.ts +++ b/src/core.ts @@ -431,7 +431,7 @@ export abstract class APIClient { error: Object | undefined, message: string | undefined, headers: Headers | undefined, - ) { + ): APIError { return APIError.generate(status, error, message, headers); } @@ -703,9 +703,9 @@ export abstract class AbstractPage implements AsyncIterable { return await this.#client.requestAPIList(this.constructor as any, nextOptions); } - async *iterPages() { + async *iterPages(): AsyncGenerator { // eslint-disable-next-line @typescript-eslint/no-this-alias - let page: AbstractPage = this; + let page: this = this; yield page; while (page.hasNextPage()) { page = await page.getNextPage(); @@ -713,7 +713,7 @@ export abstract class AbstractPage implements AsyncIterable { } } - async *[Symbol.asyncIterator]() { + async *[Symbol.asyncIterator](): AsyncGenerator { for await (const page of this.iterPages()) { for (const item of page.getPaginatedItems()) { yield item; @@ -762,7 +762,7 @@ export class PagePromise< * console.log(item) * } */ - async *[Symbol.asyncIterator]() { + async *[Symbol.asyncIterator](): AsyncGenerator { const page = await this; for await (const item of page) { yield item; diff --git a/src/error.ts b/src/error.ts index 87eeea046..72b4f7bfd 100644 --- a/src/error.ts +++ b/src/error.ts @@ -59,7 +59,7 @@ export class APIError extends OpenAIError { errorResponse: Object | undefined, message: string | undefined, headers: Headers | undefined, - ) { + ): APIError { if (!status) { return new APIConnectionError({ message, cause: castToError(errorResponse) }); } diff --git a/src/index.ts b/src/index.ts index c1506997b..33b0848e4 100644 --- a/src/index.ts +++ b/src/index.ts @@ -305,19 +305,21 @@ export class OpenAI extends Core.APIClient { static fileFromPath = Uploads.fileFromPath; } -export const OpenAIError = Errors.OpenAIError; -export const APIError = Errors.APIError; -export const APIConnectionError = Errors.APIConnectionError; -export const APIConnectionTimeoutError = Errors.APIConnectionTimeoutError; -export const APIUserAbortError = Errors.APIUserAbortError; -export const NotFoundError = Errors.NotFoundError; -export const ConflictError = Errors.ConflictError; -export const RateLimitError = Errors.RateLimitError; -export const BadRequestError = Errors.BadRequestError; -export const AuthenticationError = Errors.AuthenticationError; -export const InternalServerError = Errors.InternalServerError; -export const PermissionDeniedError = Errors.PermissionDeniedError; -export const UnprocessableEntityError = Errors.UnprocessableEntityError; +export { + OpenAIError, + APIError, + APIConnectionError, + APIConnectionTimeoutError, + APIUserAbortError, + NotFoundError, + ConflictError, + RateLimitError, + BadRequestError, + AuthenticationError, + InternalServerError, + PermissionDeniedError, + UnprocessableEntityError, +} from './error'; export import toFile = Uploads.toFile; export import fileFromPath = Uploads.fileFromPath; diff --git a/src/streaming.ts b/src/streaming.ts index 597ee89fa..b48f3ff1d 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -22,7 +22,7 @@ export class Stream implements AsyncIterable { this.controller = controller; } - static fromSSEResponse(response: Response, controller: AbortController) { + static fromSSEResponse(response: Response, controller: AbortController): Stream { let consumed = false; async function* iterator(): AsyncIterator { @@ -90,7 +90,7 @@ export class Stream implements AsyncIterable { * Generates a Stream from a newline-separated ReadableStream * where each item is a JSON value. */ - static fromReadableStream(readableStream: ReadableStream, controller: AbortController) { + static fromReadableStream(readableStream: ReadableStream, controller: AbortController): Stream { let consumed = false; async function* iterLines(): AsyncGenerator { diff --git a/tsconfig.deno.json b/tsconfig.deno.json index d0e9473d9..849e070db 100644 --- a/tsconfig.deno.json +++ b/tsconfig.deno.json @@ -1,19 +1,14 @@ { "extends": "./tsconfig.json", - "include": ["deno"], + "include": ["dist-deno"], "exclude": [], "compilerOptions": { - "rootDir": "./deno", + "rootDir": "./dist-deno", "lib": ["es2020", "DOM"], - "paths": { - "openai/_shims/auto/*": ["deno/_shims/auto/*-deno"], - "openai/*": ["deno/*"], - "openai": ["deno/index.ts"], - }, "noEmit": true, "declaration": true, "declarationMap": true, - "outDir": "deno", + "outDir": "dist-deno", "pretty": true, "sourceMap": true } diff --git a/tsconfig.json b/tsconfig.json index 09a702fca..33767f7b1 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -32,6 +32,7 @@ "noUncheckedIndexedAccess": true, "noImplicitOverride": true, "noPropertyAccessFromIndexSignature": true, + "isolatedModules": true, "skipLibCheck": true } From 1c1417ef15f0f7d718773447e338b429c7871723 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 1 Nov 2024 16:57:31 +0000 Subject: [PATCH 017/246] chore(internal): fix isolated modules exports --- src/resources/beta/chat/completions.ts | 34 ++++++++++++++------------ 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/src/resources/beta/chat/completions.ts b/src/resources/beta/chat/completions.ts index 03ea0aab5..c9360a95c 100644 --- a/src/resources/beta/chat/completions.ts +++ b/src/resources/beta/chat/completions.ts @@ -3,29 +3,14 @@ import * as Core from '../../../core'; import { APIResource } from '../../../resource'; import { ChatCompletionRunner, ChatCompletionFunctionRunnerParams } from '../../../lib/ChatCompletionRunner'; -export { ChatCompletionRunner, ChatCompletionFunctionRunnerParams } from '../../../lib/ChatCompletionRunner'; import { ChatCompletionStreamingRunner, ChatCompletionStreamingFunctionRunnerParams, } from '../../../lib/ChatCompletionStreamingRunner'; -export { - ChatCompletionStreamingRunner, - ChatCompletionStreamingFunctionRunnerParams, -} from '../../../lib/ChatCompletionStreamingRunner'; import { BaseFunctionsArgs } from '../../../lib/RunnableFunction'; -export { - RunnableFunction, - RunnableFunctions, - RunnableFunctionWithParse, - RunnableFunctionWithoutParse, - ParsingFunction, - ParsingToolFunction, -} from '../../../lib/RunnableFunction'; import { RunnerOptions } from '../../../lib/AbstractChatCompletionRunner'; import { ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; -export { ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; import { ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; -export { ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; import { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream'; import { ChatCompletion, @@ -34,7 +19,26 @@ import { ChatCompletionMessageToolCall, } from '../../chat/completions'; import { ExtractParsedContentFromParams, parseChatCompletion, validateInputTools } from '../../../lib/parser'; + +export { + ChatCompletionStreamingRunner, + type ChatCompletionStreamingFunctionRunnerParams, +} from '../../../lib/ChatCompletionStreamingRunner'; +export { + type RunnableFunction, + type RunnableFunctions, + type RunnableFunctionWithParse, + type RunnableFunctionWithoutParse, + ParsingFunction, + ParsingToolFunction, +} from '../../../lib/RunnableFunction'; +export { type ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; +export { type ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; export { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream'; +export { + ChatCompletionRunner, + type ChatCompletionFunctionRunnerParams, +} from '../../../lib/ChatCompletionRunner'; export interface ParsedFunction extends ChatCompletionMessageToolCall.Function { parsed_arguments?: unknown; From f5260ff160cec852f58ff92300d473c05b53f02e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 16:58:17 +0000 Subject: [PATCH 018/246] release: 4.70.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 18 ++++++++++++++++++ package.json | 2 +- src/version.ts | 2 +- 4 files changed, 21 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 65aac9575..d07bcaba7 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.69.0" + ".": "4.70.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index b3b52aaa3..3f355b2b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## 4.70.0 (2024-11-01) + +Full Changelog: [v4.69.0...v4.70.0](https://github.com/openai/openai-node/compare/v4.69.0...v4.70.0) + +### Features + +* publish to jsr ([#1165](https://github.com/openai/openai-node/issues/1165)) ([5aa93a7](https://github.com/openai/openai-node/commit/5aa93a7fe704ef1ad077787852db38dc29104534)) + + +### Chores + +* **internal:** fix isolated modules exports ([9cd1958](https://github.com/openai/openai-node/commit/9cd19584dcc6f4004ea1adcee917aa88a37d5f1c)) + + +### Refactors + +* use type imports for type-only imports ([#1159](https://github.com/openai/openai-node/issues/1159)) ([07bbaf6](https://github.com/openai/openai-node/commit/07bbaf6ecac9a5e36471a35488020853ddf9214f)) + ## 4.69.0 (2024-10-30) Full Changelog: [v4.68.4...v4.69.0](https://github.com/openai/openai-node/compare/v4.68.4...v4.69.0) diff --git a/package.json b/package.json index 9e32feabb..f200fdb53 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.69.0", + "version": "4.70.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index be250f2d6..f298c56c6 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.69.0'; // x-release-please-version +export const VERSION = '4.70.0'; // x-release-please-version From 9180285caf1aec6da05aa4a0058db39bd875cb60 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Fri, 1 Nov 2024 19:29:35 +0000 Subject: [PATCH 019/246] fix: don't require deno to run build-deno (#1167) --- scripts/build | 2 +- src/streaming.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/build b/scripts/build index b4d686af5..0246c90e3 100755 --- a/scripts/build +++ b/scripts/build @@ -50,7 +50,7 @@ node scripts/utils/postprocess-files.cjs (cd dist && node -e 'require("openai")') (cd dist && node -e 'import("openai")' --input-type=module) -if [ "${OPENAI_DISABLE_DENO_BUILD:-0}" != "1" ] && command -v deno &> /dev/null && [ -e ./scripts/build-deno ] +if [ "${OPENAI_DISABLE_DENO_BUILD:-0}" != "1" ] && [ -e ./scripts/build-deno ] then ./scripts/build-deno fi diff --git a/src/streaming.ts b/src/streaming.ts index b48f3ff1d..2891e6ac3 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -2,7 +2,7 @@ import { ReadableStream, type Response } from './_shims/index'; import { OpenAIError } from './error'; import { LineDecoder } from './internal/decoders/line'; -import { APIError } from 'openai/error'; +import { APIError } from './error'; type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; From 9b569856e1f39156cebbb939b7b7149b0f494c88 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 19:29:56 +0000 Subject: [PATCH 020/246] release: 4.70.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ package.json | 2 +- src/version.ts | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d07bcaba7..f458b24a5 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.70.0" + ".": "4.70.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f355b2b4..7525af900 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.70.1 (2024-11-01) + +Full Changelog: [v4.70.0...v4.70.1](https://github.com/openai/openai-node/compare/v4.70.0...v4.70.1) + +### Bug Fixes + +* don't require deno to run build-deno ([#1167](https://github.com/openai/openai-node/issues/1167)) ([9d857bc](https://github.com/openai/openai-node/commit/9d857bc531a0bb3939f7660e49b31ccc38f60dd3)) + ## 4.70.0 (2024-11-01) Full Changelog: [v4.69.0...v4.70.0](https://github.com/openai/openai-node/compare/v4.69.0...v4.70.0) diff --git a/package.json b/package.json index f200fdb53..7e14e3b3b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.70.0", + "version": "4.70.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index f298c56c6..654369eef 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.70.0'; // x-release-please-version +export const VERSION = '4.70.1'; // x-release-please-version From a3df48926c506bfe649336adcf14011e20f539b9 Mon Sep 17 00:00:00 2001 From: Young-Jin Park Date: Fri, 1 Nov 2024 15:36:55 -0400 Subject: [PATCH 021/246] fix: skip deno ecosystem test --- ecosystem-tests/cli.ts | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index c03ea668a..b0ff712f1 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -95,15 +95,16 @@ const projectRunners = { await run('bun', ['test']); } }, - deno: async () => { - // we don't need to explicitly install the package here - // because our deno setup relies on `rootDir/deno` to exist - // which is an artifact produced from our build process - await run('deno', ['task', 'install']); - await run('deno', ['task', 'check']); - - if (state.live) await run('deno', ['task', 'test']); - }, + // Temporarily comment this out until we can test with JSR transformations end-to-end. + // deno: async () => { + // // we don't need to explicitly install the package here + // // because our deno setup relies on `rootDir/deno` to exist + // // which is an artifact produced from our build process + // await run('deno', ['task', 'install']); + // await run('deno', ['task', 'check']); + + // if (state.live) await run('deno', ['task', 'test']); + // }, }; let projectNames = Object.keys(projectRunners) as Array; From dfd4bbe7412bd41622058434f193db4ad1672bbe Mon Sep 17 00:00:00 2001 From: Young-Jin Park Date: Fri, 1 Nov 2024 15:39:43 -0400 Subject: [PATCH 022/246] fix: add permissions to github workflow --- .github/workflows/create-releases.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml index 3a753b31c..19b7dd831 100644 --- a/.github/workflows/create-releases.yml +++ b/.github/workflows/create-releases.yml @@ -12,6 +12,9 @@ jobs: if: github.ref == 'refs/heads/master' && github.repository == 'openai/openai-node' runs-on: ubuntu-latest environment: publish + permissions: + contents: read + id-token: write steps: - uses: actions/checkout@v4 From 53f6ecc9e333a6e9adac2179efecdfe3f2ff6d8a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 19:40:03 +0000 Subject: [PATCH 023/246] release: 4.70.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ package.json | 2 +- src/version.ts | 2 +- 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f458b24a5..0d068338b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.70.1" + ".": "4.70.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 7525af900..09e00049c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 4.70.2 (2024-11-01) + +Full Changelog: [v4.70.1...v4.70.2](https://github.com/openai/openai-node/compare/v4.70.1...v4.70.2) + +### Bug Fixes + +* add permissions to github workflow ([ee75e00](https://github.com/openai/openai-node/commit/ee75e00b0fbf82553b219ee8948a8077e9c26a24)) +* skip deno ecosystem test ([5b181b0](https://github.com/openai/openai-node/commit/5b181b01b62139f8da35d426914c82b8425af141)) + ## 4.70.1 (2024-11-01) Full Changelog: [v4.70.0...v4.70.1](https://github.com/openai/openai-node/compare/v4.70.0...v4.70.1) diff --git a/package.json b/package.json index 7e14e3b3b..cd5fbe3f8 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.70.1", + "version": "4.70.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 654369eef..f4beff9fa 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.70.1'; // x-release-please-version +export const VERSION = '4.70.2'; // x-release-please-version From 5d6188d685f371b219456f5b5251e0f33cc3fd27 Mon Sep 17 00:00:00 2001 From: Young-Jin Park Date: Fri, 1 Nov 2024 23:39:30 -0400 Subject: [PATCH 024/246] fix: change streaming helper imports to be relative --- src/lib/AbstractChatCompletionRunner.ts | 10 ++++----- src/lib/AssistantStream.ts | 26 ++++++++++++------------ src/lib/ChatCompletionRunner.ts | 6 +++--- src/lib/ChatCompletionStream.ts | 16 +++++++-------- src/lib/ChatCompletionStreamingRunner.ts | 8 ++++---- src/lib/EventStream.ts | 2 +- src/lib/chatCompletionUtils.ts | 2 +- src/lib/parser.ts | 2 +- 8 files changed, 36 insertions(+), 36 deletions(-) diff --git a/src/lib/AbstractChatCompletionRunner.ts b/src/lib/AbstractChatCompletionRunner.ts index e943a4e4f..406f5a431 100644 --- a/src/lib/AbstractChatCompletionRunner.ts +++ b/src/lib/AbstractChatCompletionRunner.ts @@ -1,13 +1,13 @@ -import * as Core from 'openai/core'; -import { type CompletionUsage } from 'openai/resources/completions'; +import * as Core from '../core'; +import { type CompletionUsage } from '../resources/completions'; import { type ChatCompletion, type ChatCompletionMessage, type ChatCompletionMessageParam, type ChatCompletionCreateParams, type ChatCompletionTool, -} from 'openai/resources/chat/completions'; -import { OpenAIError } from 'openai/error'; +} from '../resources/chat/completions'; +import { OpenAIError } from '../error'; import { type RunnableFunction, isRunnableFunctionWithParse, @@ -23,7 +23,7 @@ import { isAssistantMessage, isFunctionMessage, isToolMessage } from './chatComp import { BaseEvents, EventStream } from './EventStream'; import { ParsedChatCompletion } from '../resources/beta/chat/completions'; import OpenAI from '../index'; -import { isAutoParsableTool, parseChatCompletion } from 'openai/lib/parser'; +import { isAutoParsableTool, parseChatCompletion } from '../lib/parser'; const DEFAULT_MAX_CHAT_COMPLETIONS = 10; export interface RunnerOptions extends Core.RequestOptions { diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts index c826c910e..caf68e7dd 100644 --- a/src/lib/AssistantStream.ts +++ b/src/lib/AssistantStream.ts @@ -8,9 +8,9 @@ import { TextDelta, MessageDelta, MessageContent, -} from 'openai/resources/beta/threads/messages'; -import * as Core from 'openai/core'; -import { RequestOptions } from 'openai/core'; +} from '../resources/beta/threads/messages'; +import * as Core from '../core'; +import { RequestOptions } from '../core'; import { Run, RunCreateParamsBase, @@ -18,18 +18,18 @@ import { Runs, RunSubmitToolOutputsParamsBase, RunSubmitToolOutputsParamsStreaming, -} from 'openai/resources/beta/threads/runs/runs'; -import { type ReadableStream } from 'openai/_shims/index'; -import { Stream } from 'openai/streaming'; -import { APIUserAbortError, OpenAIError } from 'openai/error'; +} from '../resources/beta/threads/runs/runs'; +import { type ReadableStream } from '../_shims/index'; +import { Stream } from '../streaming'; +import { APIUserAbortError, OpenAIError } from '../error'; import { AssistantStreamEvent, MessageStreamEvent, RunStepStreamEvent, RunStreamEvent, -} from 'openai/resources/beta/assistants'; -import { RunStep, RunStepDelta, ToolCall, ToolCallDelta } from 'openai/resources/beta/threads/runs/steps'; -import { ThreadCreateAndRunParamsBase, Threads } from 'openai/resources/beta/threads/threads'; +} from '../resources/beta/assistants'; +import { RunStep, RunStepDelta, ToolCall, ToolCallDelta } from '../resources/beta/threads/runs/steps'; +import { ThreadCreateAndRunParamsBase, Threads } from '../resources/beta/threads/threads'; import { BaseEvents, EventStream } from './EventStream'; export interface AssistantStreamEvents extends BaseEvents { @@ -192,7 +192,7 @@ export class AssistantStream runs: Runs, params: RunSubmitToolOutputsParamsStream, options: RequestOptions | undefined, - ) { + ): AssistantStream { const runner = new AssistantStream(); runner._run(() => runner._runToolAssistantStream(threadId, runId, runs, params, { @@ -238,7 +238,7 @@ export class AssistantStream params: ThreadCreateAndRunParamsBaseStream, thread: Threads, options?: RequestOptions, - ) { + ): AssistantStream { const runner = new AssistantStream(); runner._run(() => runner._threadAssistantStream(params, thread, { @@ -254,7 +254,7 @@ export class AssistantStream runs: Runs, params: RunCreateParamsBaseStream, options?: RequestOptions, - ) { + ): AssistantStream { const runner = new AssistantStream(); runner._run(() => runner._runAssistantStream(threadId, runs, params, { diff --git a/src/lib/ChatCompletionRunner.ts b/src/lib/ChatCompletionRunner.ts index 0b962a110..9e68e6671 100644 --- a/src/lib/ChatCompletionRunner.ts +++ b/src/lib/ChatCompletionRunner.ts @@ -1,7 +1,7 @@ import { type ChatCompletionMessageParam, type ChatCompletionCreateParamsNonStreaming, -} from 'openai/resources/chat/completions'; +} from '../resources/chat/completions'; import { type RunnableFunctions, type BaseFunctionsArgs, RunnableTools } from './RunnableFunction'; import { AbstractChatCompletionRunner, @@ -9,8 +9,8 @@ import { RunnerOptions, } from './AbstractChatCompletionRunner'; import { isAssistantMessage } from './chatCompletionUtils'; -import OpenAI from 'openai/index'; -import { AutoParseableTool } from 'openai/lib/parser'; +import OpenAI from '../index'; +import { AutoParseableTool } from '../lib/parser'; export interface ChatCompletionRunnerEvents extends AbstractChatCompletionRunnerEvents { content: (content: string) => void; diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts index e3661c8c1..a88f8a23b 100644 --- a/src/lib/ChatCompletionStream.ts +++ b/src/lib/ChatCompletionStream.ts @@ -1,10 +1,10 @@ -import * as Core from 'openai/core'; +import * as Core from '../core'; import { OpenAIError, APIUserAbortError, LengthFinishReasonError, ContentFilterFinishReasonError, -} from 'openai/error'; +} from '../error'; import { ChatCompletionTokenLogprob, type ChatCompletion, @@ -12,15 +12,15 @@ import { type ChatCompletionCreateParams, type ChatCompletionCreateParamsStreaming, type ChatCompletionCreateParamsBase, -} from 'openai/resources/chat/completions'; +} from '../resources/chat/completions'; import { AbstractChatCompletionRunner, type AbstractChatCompletionRunnerEvents, } from './AbstractChatCompletionRunner'; -import { type ReadableStream } from 'openai/_shims/index'; -import { Stream } from 'openai/streaming'; -import OpenAI from 'openai/index'; -import { ParsedChatCompletion } from 'openai/resources/beta/chat/completions'; +import { type ReadableStream } from '../_shims/index'; +import { Stream } from '../streaming'; +import OpenAI from '../index'; +import { ParsedChatCompletion } from '../resources/beta/chat/completions'; import { AutoParseableResponseFormat, hasAutoParseableInput, @@ -28,7 +28,7 @@ import { isAutoParsableTool, maybeParseChatCompletion, shouldParseToolCall, -} from 'openai/lib/parser'; +} from '../lib/parser'; import { partialParse } from '../_vendor/partial-json-parser/parser'; export interface ContentDeltaEvent { diff --git a/src/lib/ChatCompletionStreamingRunner.ts b/src/lib/ChatCompletionStreamingRunner.ts index ea6c74116..ba0c6496f 100644 --- a/src/lib/ChatCompletionStreamingRunner.ts +++ b/src/lib/ChatCompletionStreamingRunner.ts @@ -1,13 +1,13 @@ import { type ChatCompletionChunk, type ChatCompletionCreateParamsStreaming, -} from 'openai/resources/chat/completions'; +} from '../resources/chat/completions'; import { RunnerOptions, type AbstractChatCompletionRunnerEvents } from './AbstractChatCompletionRunner'; -import { type ReadableStream } from 'openai/_shims/index'; +import { type ReadableStream } from '../_shims/index'; import { RunnableTools, type BaseFunctionsArgs, type RunnableFunctions } from './RunnableFunction'; import { ChatCompletionSnapshot, ChatCompletionStream } from './ChatCompletionStream'; -import OpenAI from 'openai/index'; -import { AutoParseableTool } from 'openai/lib/parser'; +import OpenAI from '../index'; +import { AutoParseableTool } from '../lib/parser'; export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents { content: (contentDelta: string, contentSnapshot: string) => void; diff --git a/src/lib/EventStream.ts b/src/lib/EventStream.ts index a18c771dd..d3f485e9d 100644 --- a/src/lib/EventStream.ts +++ b/src/lib/EventStream.ts @@ -1,4 +1,4 @@ -import { APIUserAbortError, OpenAIError } from 'openai/error'; +import { APIUserAbortError, OpenAIError } from '../error'; export class EventStream { controller: AbortController = new AbortController(); diff --git a/src/lib/chatCompletionUtils.ts b/src/lib/chatCompletionUtils.ts index a0d9099de..7e9f8a093 100644 --- a/src/lib/chatCompletionUtils.ts +++ b/src/lib/chatCompletionUtils.ts @@ -3,7 +3,7 @@ import { type ChatCompletionFunctionMessageParam, type ChatCompletionMessageParam, type ChatCompletionToolMessageParam, -} from 'openai/resources'; +} from '../resources'; export const isAssistantMessage = ( message: ChatCompletionMessageParam | null | undefined, diff --git a/src/lib/parser.ts b/src/lib/parser.ts index 8bf2a3a36..f2678e312 100644 --- a/src/lib/parser.ts +++ b/src/lib/parser.ts @@ -13,7 +13,7 @@ import { ParsedFunctionToolCall, } from '../resources/beta/chat/completions'; import { ResponseFormatJSONSchema } from '../resources/shared'; -import { ContentFilterFinishReasonError, LengthFinishReasonError, OpenAIError } from 'openai/error'; +import { ContentFilterFinishReasonError, LengthFinishReasonError, OpenAIError } from '../error'; type AnyChatCompletionCreateParams = | ChatCompletionCreateParams From e0b675f7ee8202a7522be588f4bc297553f5fb3a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 05:07:32 +0000 Subject: [PATCH 025/246] release: 4.70.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ package.json | 2 +- src/version.ts | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 0d068338b..6c3b02fed 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.70.2" + ".": "4.70.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 09e00049c..abe273b81 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.70.3 (2024-11-04) + +Full Changelog: [v4.70.2...v4.70.3](https://github.com/openai/openai-node/compare/v4.70.2...v4.70.3) + +### Bug Fixes + +* change streaming helper imports to be relative ([e73b7cf](https://github.com/openai/openai-node/commit/e73b7cf84272bd02a39a67795d49db23db2d970f)) + ## 4.70.2 (2024-11-01) Full Changelog: [v4.70.1...v4.70.2](https://github.com/openai/openai-node/compare/v4.70.1...v4.70.2) diff --git a/package.json b/package.json index cd5fbe3f8..e9d130380 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.70.2", + "version": "4.70.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index f4beff9fa..04f8abf02 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.70.2'; // x-release-please-version +export const VERSION = '4.70.3'; // x-release-please-version From 840179f42eeffc8e533f4b7b2a38e36c593ad8e5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 22:43:08 +0000 Subject: [PATCH 026/246] feat(api): add support for predicted outputs (#1172) --- .stats.yml | 2 +- api.md | 1 + src/index.ts | 2 + src/resources/audio/speech.ts | 4 +- src/resources/audio/transcriptions.ts | 2 +- src/resources/audio/translations.ts | 2 +- src/resources/beta/assistants.ts | 36 +++++++-------- src/resources/beta/threads/messages.ts | 4 +- src/resources/beta/threads/runs/runs.ts | 18 ++++---- src/resources/beta/threads/runs/steps.ts | 8 ++-- src/resources/beta/threads/threads.ts | 10 ++--- .../beta/vector-stores/file-batches.ts | 4 +- src/resources/beta/vector-stores/files.ts | 4 +- .../beta/vector-stores/vector-stores.ts | 4 +- src/resources/chat/chat.ts | 2 + src/resources/chat/completions.ts | 44 +++++++++++++++---- src/resources/chat/index.ts | 1 + src/resources/completions.ts | 24 +++++++--- src/resources/embeddings.ts | 6 +-- src/resources/files.ts | 17 ++++--- src/resources/fine-tuning/jobs/jobs.ts | 2 +- src/resources/images.ts | 6 +-- src/resources/moderations.ts | 2 +- src/resources/uploads/uploads.ts | 2 +- tests/api-resources/chat/completions.test.ts | 1 + tests/api-resources/files.test.ts | 5 ++- 26 files changed, 133 insertions(+), 80 deletions(-) diff --git a/.stats.yml b/.stats.yml index 39413df44..f368bc881 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-7b0a5d715d94f75ac7795bd4d2175a0e3243af9b935a86c273f371e45583140f.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2f8ca92b9b1879fd535b685e4767338413fcd533d42f3baac13a9c41da3fce35.yml diff --git a/api.md b/api.md index da60f65bd..465730de8 100644 --- a/api.md +++ b/api.md @@ -48,6 +48,7 @@ Types: - ChatCompletionMessageToolCall - ChatCompletionModality - ChatCompletionNamedToolChoice +- ChatCompletionPredictionContent - ChatCompletionRole - ChatCompletionStreamOptions - ChatCompletionSystemMessageParam diff --git a/src/index.ts b/src/index.ts index 33b0848e4..c3299e00d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -87,6 +87,7 @@ import { ChatCompletionMessageToolCall, ChatCompletionModality, ChatCompletionNamedToolChoice, + ChatCompletionPredictionContent, ChatCompletionRole, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, @@ -379,6 +380,7 @@ export declare namespace OpenAI { type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, + type ChatCompletionPredictionContent as ChatCompletionPredictionContent, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index da99bf649..1cda80f79 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -22,7 +22,7 @@ export interface SpeechCreateParams { input: string; /** - * One of the available [TTS models](https://platform.openai.com/docs/models/tts): + * One of the available [TTS models](https://platform.openai.com/docs/models#tts): * `tts-1` or `tts-1-hd` */ model: (string & {}) | SpeechModel; @@ -31,7 +31,7 @@ export interface SpeechCreateParams { * The voice to use when generating the audio. Supported voices are `alloy`, * `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are * available in the - * [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options). + * [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). */ voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer'; diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index dd4258787..0b6da4620 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -174,7 +174,7 @@ export interface TranscriptionCreateParams< /** * An optional text to guide the model's style or continue a previous audio * segment. The - * [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) * should match the audio language. */ prompt?: string; diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index b98a95044..c6bf7c870 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -76,7 +76,7 @@ export interface TranslationCreateParams< /** * An optional text to guide the model's style or continue a previous audio * segment. The - * [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) * should be in English. */ prompt?: string; diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 6d48089ce..0e657b1d4 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -121,8 +121,8 @@ export interface Assistant { * ID of the model to use. You can use the * [List models](https://platform.openai.com/docs/api-reference/models/list) API to * see all of your available models, or see our - * [Model overview](https://platform.openai.com/docs/models/overview) for - * descriptions of them. + * [Model overview](https://platform.openai.com/docs/models) for descriptions of + * them. */ model: string; @@ -145,8 +145,8 @@ export interface Assistant { /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -620,7 +620,7 @@ export namespace AssistantStreamEvent { /** * Occurs when an - * [error](https://platform.openai.com/docs/guides/error-codes/api-errors) occurs. + * [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. * This can happen due to an internal server error or a timeout. */ export interface ErrorEvent { @@ -663,7 +663,7 @@ export namespace FileSearchTool { * * Note that the file search tool may output fewer than `max_num_results` results. * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ max_num_results?: number; @@ -673,7 +673,7 @@ export namespace FileSearchTool { * will use the `auto` ranker and a score_threshold of 0. * * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ ranking_options?: FileSearch.RankingOptions; @@ -685,7 +685,7 @@ export namespace FileSearchTool { * will use the `auto` ranker and a score_threshold of 0. * * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ export interface RankingOptions { @@ -1100,8 +1100,8 @@ export interface AssistantCreateParams { * ID of the model to use. You can use the * [List models](https://platform.openai.com/docs/api-reference/models/list) API to * see all of your available models, or see our - * [Model overview](https://platform.openai.com/docs/models/overview) for - * descriptions of them. + * [Model overview](https://platform.openai.com/docs/models) for descriptions of + * them. */ model: (string & {}) | ChatAPI.ChatModel; @@ -1131,8 +1131,8 @@ export interface AssistantCreateParams { /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1277,8 +1277,8 @@ export interface AssistantUpdateParams { * ID of the model to use. You can use the * [List models](https://platform.openai.com/docs/api-reference/models/list) API to * see all of your available models, or see our - * [Model overview](https://platform.openai.com/docs/models/overview) for - * descriptions of them. + * [Model overview](https://platform.openai.com/docs/models) for descriptions of + * them. */ model?: string; @@ -1289,8 +1289,8 @@ export interface AssistantUpdateParams { /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1383,8 +1383,8 @@ export interface AssistantListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index af7977667..8124f56cd 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -704,8 +704,8 @@ export interface MessageListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 83a447a91..814ad3e89 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -435,7 +435,7 @@ export interface Run { /** * Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ parallel_tool_calls: boolean; @@ -448,8 +448,8 @@ export interface Run { /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -660,7 +660,7 @@ export interface RunCreateParamsBase { * search result content. * * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ include?: Array; @@ -721,15 +721,15 @@ export interface RunCreateParamsBase { /** * Body param: Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ parallel_tool_calls?: boolean; /** * Body param: Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -909,8 +909,8 @@ export interface RunListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index b10bcb868..6c6722b62 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -705,7 +705,7 @@ export interface StepRetrieveParams { * to fetch the file search result content. * * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ include?: Array; @@ -715,8 +715,8 @@ export interface StepListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; @@ -726,7 +726,7 @@ export interface StepListParams extends CursorPageParams { * to fetch the file search result content. * * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ include?: Array; diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 899645508..453d8fa10 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -176,8 +176,8 @@ export class Threads extends APIResource { /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -565,15 +565,15 @@ export interface ThreadCreateAndRunParamsBase { /** * Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ parallel_tool_calls?: boolean; /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/beta/vector-stores/file-batches.ts index 533e6ce03..2c47cb9c2 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/beta/vector-stores/file-batches.ts @@ -276,8 +276,8 @@ export interface FileBatchListFilesParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts index a263a0491..1fda9a99b 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/beta/vector-stores/files.ts @@ -268,8 +268,8 @@ export interface FileListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts index 4d1e83dce..35ad8c369 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/beta/vector-stores/vector-stores.ts @@ -372,8 +372,8 @@ export interface VectorStoreListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index afe4dd08e..351430f8c 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -23,6 +23,7 @@ import { ChatCompletionMessageToolCall, ChatCompletionModality, ChatCompletionNamedToolChoice, + ChatCompletionPredictionContent, ChatCompletionRole, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, @@ -101,6 +102,7 @@ export declare namespace Chat { type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, + type ChatCompletionPredictionContent as ChatCompletionPredictionContent, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 430e52bb2..9d344744a 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -469,7 +469,7 @@ export namespace ChatCompletionContentPartImage { /** * Specifies the detail level of the image. Learn more in the - * [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). + * [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). */ detail?: 'auto' | 'low' | 'high'; } @@ -687,6 +687,25 @@ export namespace ChatCompletionNamedToolChoice { } } +/** + * Static predicted output content, such as the content of a text file that is + * being regenerated. + */ +export interface ChatCompletionPredictionContent { + /** + * The content that should be matched when generating a model response. If + * generated tokens would match this content, the entire model response can be + * returned much more quickly. + */ + content: string | Array; + + /** + * The type of the predicted content you want to provide. This type is currently + * always `content`. + */ + type: 'content'; +} + /** * The role of the author of a message */ @@ -855,7 +874,7 @@ export interface ChatCompletionCreateParamsBase { /** * ID of the model to use. See the - * [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + * [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) * table for details on which models work with the Chat API. */ model: (string & {}) | ChatAPI.ChatModel; @@ -872,7 +891,7 @@ export interface ChatCompletionCreateParamsBase { * existing frequency in the text so far, decreasing the model's likelihood to * repeat the same line verbatim. * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) */ frequency_penalty?: number | null; @@ -963,25 +982,31 @@ export interface ChatCompletionCreateParamsBase { /** * Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ parallel_tool_calls?: boolean; + /** + * Static predicted output content, such as the content of a text file that is + * being regenerated. + */ + prediction?: ChatCompletionPredictionContent | null; + /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on * whether they appear in the text so far, increasing the model's likelihood to * talk about new topics. * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) */ presence_penalty?: number | null; /** * An object specifying the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1107,7 +1132,7 @@ export interface ChatCompletionCreateParamsBase { /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } @@ -1204,6 +1229,7 @@ export declare namespace Completions { type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, + type ChatCompletionPredictionContent as ChatCompletionPredictionContent, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index d9366bf74..262bf75a2 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -20,6 +20,7 @@ export { type ChatCompletionMessageToolCall, type ChatCompletionModality, type ChatCompletionNamedToolChoice, + type ChatCompletionPredictionContent, type ChatCompletionRole, type ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam, diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 94c4581a1..be75a46f0 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -137,6 +137,12 @@ export namespace CompletionUsage { * Breakdown of tokens used in a completion. */ export interface CompletionTokensDetails { + /** + * When using Predicted Outputs, the number of tokens in the prediction that + * appeared in the completion. + */ + accepted_prediction_tokens?: number; + /** * Audio input tokens generated by the model. */ @@ -146,6 +152,14 @@ export namespace CompletionUsage { * Tokens generated by the model for reasoning. */ reasoning_tokens?: number; + + /** + * When using Predicted Outputs, the number of tokens in the prediction that did + * not appear in the completion. However, like reasoning tokens, these tokens are + * still counted in the total completion tokens for purposes of billing, output, + * and context window limits. + */ + rejected_prediction_tokens?: number; } /** @@ -171,8 +185,8 @@ export interface CompletionCreateParamsBase { * ID of the model to use. You can use the * [List models](https://platform.openai.com/docs/api-reference/models/list) API to * see all of your available models, or see our - * [Model overview](https://platform.openai.com/docs/models/overview) for - * descriptions of them. + * [Model overview](https://platform.openai.com/docs/models) for descriptions of + * them. */ model: (string & {}) | 'gpt-3.5-turbo-instruct' | 'davinci-002' | 'babbage-002'; @@ -209,7 +223,7 @@ export interface CompletionCreateParamsBase { * existing frequency in the text so far, decreasing the model's likelihood to * repeat the same line verbatim. * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) */ frequency_penalty?: number | null; @@ -264,7 +278,7 @@ export interface CompletionCreateParamsBase { * whether they appear in the text so far, increasing the model's likelihood to * talk about new topics. * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) */ presence_penalty?: number | null; @@ -327,7 +341,7 @@ export interface CompletionCreateParamsBase { /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index e2b35f530..4b1644a68 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -94,8 +94,8 @@ export interface EmbeddingCreateParams { * ID of the model to use. You can use the * [List models](https://platform.openai.com/docs/api-reference/models/list) API to * see all of your available models, or see our - * [Model overview](https://platform.openai.com/docs/models/overview) for - * descriptions of them. + * [Model overview](https://platform.openai.com/docs/models) for descriptions of + * them. */ model: (string & {}) | EmbeddingModel; @@ -114,7 +114,7 @@ export interface EmbeddingCreateParams { /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } diff --git a/src/resources/files.ts b/src/resources/files.ts index dec815a28..48d8f8747 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -5,7 +5,7 @@ import { isRequestOptions } from '../core'; import { sleep } from '../core'; import { APIConnectionTimeoutError } from '../error'; import * as Core from '../core'; -import { Page } from '../pagination'; +import { CursorPage, type CursorPageParams } from '../pagination'; import { type Response } from '../_shims/index'; export class Files extends APIResource { @@ -44,7 +44,7 @@ export class Files extends APIResource { } /** - * Returns a list of files that belong to the user's organization. + * Returns a list of files. */ list(query?: FileListParams, options?: Core.RequestOptions): Core.PagePromise; list(options?: Core.RequestOptions): Core.PagePromise; @@ -111,10 +111,7 @@ export class Files extends APIResource { } } -/** - * Note: no pagination actually occurs yet, this is for forwards-compatibility. - */ -export class FileObjectsPage extends Page {} +export class FileObjectsPage extends CursorPage {} export type FileContent = string; @@ -213,7 +210,13 @@ export interface FileCreateParams { purpose: FilePurpose; } -export interface FileListParams { +export interface FileListParams extends CursorPageParams { + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending + * order and `desc` for descending order. + */ + order?: 'asc' | 'desc'; + /** * Only return files with the given purpose. */ diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 275fad869..0c320e028 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -304,7 +304,7 @@ export interface FineTuningJobWandbIntegrationObject { export interface JobCreateParams { /** * The name of the model to fine-tune. You can select one of the - * [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + * [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). */ model: (string & {}) | 'babbage-002' | 'davinci-002' | 'gpt-3.5-turbo' | 'gpt-4o-mini'; diff --git a/src/resources/images.ts b/src/resources/images.ts index f4d59b941..8e1c6d92e 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -94,7 +94,7 @@ export interface ImageCreateVariationParams { /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } @@ -146,7 +146,7 @@ export interface ImageEditParams { /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } @@ -201,7 +201,7 @@ export interface ImageGenerateParams { /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index cdde12a62..f7b16166d 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -351,7 +351,7 @@ export interface ModerationCreateParams { * The content moderation model you would like to use. Learn more in * [the moderation guide](https://platform.openai.com/docs/guides/moderation), and * learn about available models - * [here](https://platform.openai.com/docs/models/moderation). + * [here](https://platform.openai.com/docs/models#moderation). */ model?: (string & {}) | ModerationModel; } diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts index 78fa3a7b5..8491d0fe2 100644 --- a/src/resources/uploads/uploads.ts +++ b/src/resources/uploads/uploads.ts @@ -25,7 +25,7 @@ export class Uploads extends APIResource { * For certain `purpose`s, the correct `mime_type` must be specified. Please refer * to documentation for the supported MIME types for your use case: * - * - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files) + * - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files) * * For guidance on the proper filename extensions for each purpose, please follow * the documentation on diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 77d4a251c..180a1d77f 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -39,6 +39,7 @@ describe('resource completions', () => { modalities: ['text', 'audio'], n: 1, parallel_tool_calls: true, + prediction: { content: 'string', type: 'content' }, presence_penalty: -2, response_format: { type: 'text' }, seed: -9007199254740991, diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts index bbaa45a65..c907c4987 100644 --- a/tests/api-resources/files.test.ts +++ b/tests/api-resources/files.test.ts @@ -69,7 +69,10 @@ describe('resource files', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.files.list({ purpose: 'purpose' }, { path: '/_stainless_unknown_path' }), + client.files.list( + { after: 'after', limit: 0, order: 'asc', purpose: 'purpose' }, + { path: '/_stainless_unknown_path' }, + ), ).rejects.toThrow(OpenAI.NotFoundError); }); From 35cfdb8d400d90403319418bfe345e0d1bd24be5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 22:43:34 +0000 Subject: [PATCH 027/246] release: 4.71.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ package.json | 2 +- src/version.ts | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6c3b02fed..b295c3f54 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.70.3" + ".": "4.71.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index abe273b81..bb769c53e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.71.0 (2024-11-04) + +Full Changelog: [v4.70.3...v4.71.0](https://github.com/openai/openai-node/compare/v4.70.3...v4.71.0) + +### Features + +* **api:** add support for predicted outputs ([#1172](https://github.com/openai/openai-node/issues/1172)) ([08a7bb4](https://github.com/openai/openai-node/commit/08a7bb4d4b751aeed9655bfcb9fa27fc79a767c4)) + ## 4.70.3 (2024-11-04) Full Changelog: [v4.70.2...v4.70.3](https://github.com/openai/openai-node/compare/v4.70.2...v4.70.3) diff --git a/package.json b/package.json index e9d130380..501d4f31e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.70.3", + "version": "4.71.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 04f8abf02..273878132 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.70.3'; // x-release-please-version +export const VERSION = '4.71.0'; // x-release-please-version From f0a1288d37683e8eee7df6a9e5838fbfee35cbe3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 21:19:10 +0000 Subject: [PATCH 028/246] fix: change release please configuration for jsr.json (#1174) --- release-please-config.json | 6 +++++- scripts/build-deno | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/release-please-config.json b/release-please-config.json index 377a76e99..1aa2fb613 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -63,6 +63,10 @@ "extra-files": [ "src/version.ts", "README.md", - "jsr.json" + { + "type": "json", + "path": "jsr.json", + "jsonpath": "$.version" + } ] } diff --git a/scripts/build-deno b/scripts/build-deno index 7d542cf24..4a2000a66 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -14,6 +14,6 @@ for dir in dist-deno/_shims dist-deno/_shims/auto; do mv -- "$file" "${file%-deno.ts}.ts" done done -for file in LICENSE CHANGELOG.md; do +for file in README.md LICENSE CHANGELOG.md; do if [ -e "${file}" ]; then cp "${file}" dist-deno; fi done From f41f1811c90b3e3b54a4356c8e2ca39189f4ce66 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 6 Nov 2024 05:06:50 +0000 Subject: [PATCH 029/246] release: 4.71.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 6 ++++-- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 15 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b295c3f54..6fbbb03de 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.71.0" + ".": "4.71.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index bb769c53e..1e74a8ee3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.71.1 (2024-11-06) + +Full Changelog: [v4.71.0...v4.71.1](https://github.com/openai/openai-node/compare/v4.71.0...v4.71.1) + +### Bug Fixes + +* change release please configuration for jsr.json ([#1174](https://github.com/openai/openai-node/issues/1174)) ([c39efba](https://github.com/openai/openai-node/commit/c39efba812209c8906315596cc0a56e54ae8590a)) + ## 4.71.0 (2024-11-04) Full Changelog: [v4.70.3...v4.71.0](https://github.com/openai/openai-node/compare/v4.70.3...v4.71.0) diff --git a/jsr.json b/jsr.json index fefb5b291..48a838612 100644 --- a/jsr.json +++ b/jsr.json @@ -1,8 +1,10 @@ { "name": "@openai/openai", - "version": "4.47.1", + "version": "4.71.1", "exports": "./index.ts", "publish": { - "exclude": ["!."] + "exclude": [ + "!." + ] } } diff --git a/package.json b/package.json index 501d4f31e..dd3dfba7a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.71.0", + "version": "4.71.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 273878132..3474c77c3 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.71.0'; // x-release-please-version +export const VERSION = '4.71.1'; // x-release-please-version From 4dfb0c6aa7c4530665bc7d6beebcd04aa1490e27 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 20:50:47 +0000 Subject: [PATCH 030/246] chore(ecosystem-tests): bump wrangler version (#1178) Co-authored-by: stainless-bot --- .../cloudflare-worker/package-lock.json | 301 +++++++++++------- .../cloudflare-worker/package.json | 2 +- 2 files changed, 189 insertions(+), 114 deletions(-) diff --git a/ecosystem-tests/cloudflare-worker/package-lock.json b/ecosystem-tests/cloudflare-worker/package-lock.json index 0673bb27c..99d787f75 100644 --- a/ecosystem-tests/cloudflare-worker/package-lock.json +++ b/ecosystem-tests/cloudflare-worker/package-lock.json @@ -17,7 +17,7 @@ "start-server-and-test": "^2.0.0", "ts-jest": "^29.1.0", "typescript": "5.0.4", - "wrangler": "^3.0.0" + "wrangler": "^3.85.0" } }, "node_modules/@ampproject/remapping": { @@ -662,18 +662,21 @@ "dev": true }, "node_modules/@cloudflare/kv-asset-handler": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@cloudflare/kv-asset-handler/-/kv-asset-handler-0.2.0.tgz", - "integrity": "sha512-MVbXLbTcAotOPUj0pAMhVtJ+3/kFkwJqc5qNOleOZTv6QkZZABDMS21dSrSlVswEHwrpWC03e4fWytjqKvuE2A==", + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/@cloudflare/kv-asset-handler/-/kv-asset-handler-0.3.4.tgz", + "integrity": "sha512-YLPHc8yASwjNkmcDMQMY35yiWjoKAKnhUbPRszBRS0YgH+IXtsMp61j+yTcnCE3oO2DgP0U3iejLC8FTtKDC8Q==", "dev": true, "dependencies": { "mime": "^3.0.0" + }, + "engines": { + "node": ">=16.13" } }, "node_modules/@cloudflare/workerd-darwin-64": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20231030.0.tgz", - "integrity": "sha512-J4PQ9utPxLya9yHdMMx3AZeC5M/6FxcoYw6jo9jbDDFTy+a4Gslqf4Im9We3aeOEdPXa3tgQHVQOSelJSZLhIw==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20241022.0.tgz", + "integrity": "sha512-1NNYun37myMTgCUiPQEJ0cMal4mKZVTpkD0b2tx9hV70xji+frVJcSK8YVLeUm1P+Rw1d/ct8DMgQuCpsz3Fsw==", "cpu": [ "x64" ], @@ -687,9 +690,9 @@ } }, "node_modules/@cloudflare/workerd-darwin-arm64": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20231030.0.tgz", - "integrity": "sha512-WSJJjm11Del4hSneiNB7wTXGtBXI4QMCH9l5qf4iT5PAW8cESGcCmdHtWDWDtGAAGcvmLT04KNvmum92vRKKQQ==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20241022.0.tgz", + "integrity": "sha512-FOO/0P0U82EsTLTdweNVgw+4VOk5nghExLPLSppdOziq6IR5HVgP44Kmq5LdsUeHUhwUmfOh9hzaTpkNzUqKvw==", "cpu": [ "arm64" ], @@ -703,9 +706,9 @@ } }, "node_modules/@cloudflare/workerd-linux-64": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20231030.0.tgz", - "integrity": "sha512-2HUeRTvoCC17fxE0qdBeR7J9dO8j4A8ZbdcvY8pZxdk+zERU6+N03RTbk/dQMU488PwiDvcC3zZqS4gwLfVT8g==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20241022.0.tgz", + "integrity": "sha512-RsNc19BQJG9yd+ngnjuDeG9ywZG+7t1L4JeglgceyY5ViMNMKVO7Zpbsu69kXslU9h6xyQG+lrmclg3cBpnhYA==", "cpu": [ "x64" ], @@ -719,9 +722,9 @@ } }, "node_modules/@cloudflare/workerd-linux-arm64": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20231030.0.tgz", - "integrity": "sha512-4/GK5zHh+9JbUI6Z5xTCM0ZmpKKHk7vu9thmHjUxtz+o8Ne9DoD7DlDvXQWgMF6XGaTubDWyp3ttn+Qv8jDFuQ==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20241022.0.tgz", + "integrity": "sha512-x5mUXpKxfsosxcFmcq5DaqLs37PejHYVRsNz1cWI59ma7aC4y4Qn6Tf3i0r9MwQTF/MccP4SjVslMU6m4W7IaA==", "cpu": [ "arm64" ], @@ -735,9 +738,9 @@ } }, "node_modules/@cloudflare/workerd-windows-64": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20231030.0.tgz", - "integrity": "sha512-fb/Jgj8Yqy3PO1jLhk7mTrHMkR8jklpbQFud6rL/aMAn5d6MQbaSrYOCjzkKGp0Zng8D2LIzSl+Fc0C9Sggxjg==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20241022.0.tgz", + "integrity": "sha512-eBCClx4szCOgKqOlxxbdNszMqQf3MRG1B9BRIqEM/diDfdR9IrZ8l3FaEm+l9gXgPmS6m1NBn40aWuGBl8UTSw==", "cpu": [ "x64" ], @@ -750,12 +753,47 @@ "node": ">=16" } }, + "node_modules/@cloudflare/workers-shared": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workers-shared/-/workers-shared-0.7.0.tgz", + "integrity": "sha512-LLQRTqx7lKC7o2eCYMpyc5FXV8d0pUX6r3A+agzhqS9aoR5A6zCPefwQGcvbKx83ozX22ATZcemwxQXn12UofQ==", + "dev": true, + "dependencies": { + "mime": "^3.0.0", + "zod": "^3.22.3" + }, + "engines": { + "node": ">=16.7.0" + } + }, "node_modules/@cloudflare/workers-types": { - "version": "4.20230821.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workers-types/-/workers-types-4.20230821.0.tgz", - "integrity": "sha512-lVQSyr5E4CEkQw7WIdsrMTj+kHjsm28mJ0B5AhNFByKR+16KTFsU/RW/nGLKHHW2jxT5lvYI+HjNQMzC9QR8Ng==", + "version": "4.20241106.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workers-types/-/workers-types-4.20241106.0.tgz", + "integrity": "sha512-pI4ivacmp+vgNO/siHDsZ6BdITR0LC4Mh/1+yzVLcl9U75pt5DUDCOWOiqIRFXRq6H65DPnJbEPFo3x9UfgofQ==", "dev": true }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, "node_modules/@esbuild-plugins/node-globals-polyfill": { "version": "0.2.3", "resolved": "https://registry.npmjs.org/@esbuild-plugins/node-globals-polyfill/-/node-globals-polyfill-0.2.3.tgz", @@ -1142,6 +1180,15 @@ "node": ">=12" } }, + "node_modules/@fastify/busboy": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@fastify/busboy/-/busboy-2.1.1.tgz", + "integrity": "sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==", + "dev": true, + "engines": { + "node": ">=14" + } + }, "node_modules/@hapi/hoek": { "version": "9.3.0", "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", @@ -1655,9 +1702,9 @@ "dev": true }, "node_modules/acorn": { - "version": "8.10.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", - "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", + "version": "8.14.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz", + "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==", "dev": true, "bin": { "acorn": "bin/acorn" @@ -1667,10 +1714,13 @@ } }, "node_modules/acorn-walk": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", - "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", "dev": true, + "dependencies": { + "acorn": "^8.11.0" + }, "engines": { "node": ">=0.4.0" } @@ -1983,18 +2033,6 @@ "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", "dev": true }, - "node_modules/busboy": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", - "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==", - "dev": true, - "dependencies": { - "streamsearch": "^1.1.0" - }, - "engines": { - "node": ">=10.16.0" - } - }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -2198,9 +2236,9 @@ "dev": true }, "node_modules/cookie": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", - "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", "dev": true, "engines": { "node": ">= 0.6" @@ -2249,6 +2287,16 @@ "node": ">= 12" } }, + "node_modules/date-fns": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-4.1.0.tgz", + "integrity": "sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg==", + "dev": true, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/kossnocorp" + } + }, "node_modules/debug": { "version": "4.3.4", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", @@ -2289,6 +2337,12 @@ "node": ">=0.10.0" } }, + "node_modules/defu": { + "version": "6.1.4", + "resolved": "https://registry.npmjs.org/defu/-/defu-6.1.4.tgz", + "integrity": "sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==", + "dev": true + }, "node_modules/delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", @@ -3038,6 +3092,12 @@ "node": ">=8" } }, + "node_modules/itty-time": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/itty-time/-/itty-time-1.0.6.tgz", + "integrity": "sha512-+P8IZaLLBtFv8hCkIjcymZOp4UJ+xW6bSlQsXGqrkmJh7vSiMFSlNne0mCYagEE0N7HDNR5jJBRxwN0oYv61Rw==", + "dev": true + }, "node_modules/jest": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", @@ -3894,23 +3954,23 @@ } }, "node_modules/miniflare": { - "version": "3.20231030.3", - "resolved": "https://registry.npmjs.org/miniflare/-/miniflare-3.20231030.3.tgz", - "integrity": "sha512-lquHSh0XiO8uoWDujOLHtDS9mkUTJTc5C5amiQ6A++5y0f+DWiMqbDBvvwjlYf4Dvqk6ChFya9dztk7fg2ZVxA==", + "version": "3.20241022.0", + "resolved": "https://registry.npmjs.org/miniflare/-/miniflare-3.20241022.0.tgz", + "integrity": "sha512-x9Fbq1Hmz1f0osIT9Qmj78iX4UpCP2EqlZnA/tzj/3+I49vc3Kq0fNqSSKplcdf6HlCHdL3fOBicmreQF4BUUQ==", "dev": true, "dependencies": { + "@cspotcode/source-map-support": "0.8.1", "acorn": "^8.8.0", "acorn-walk": "^8.2.0", "capnp-ts": "^0.7.0", "exit-hook": "^2.2.1", "glob-to-regexp": "^0.4.1", - "source-map-support": "0.5.21", "stoppable": "^1.1.0", - "undici": "^5.22.1", - "workerd": "1.20231030.0", - "ws": "^8.11.0", + "undici": "^5.28.4", + "workerd": "1.20241022.0", + "ws": "^8.17.1", "youch": "^3.2.2", - "zod": "^3.20.6" + "zod": "^3.22.3" }, "bin": { "miniflare": "bootstrap.js" @@ -3919,16 +3979,6 @@ "node": ">=16.13" } }, - "node_modules/miniflare/node_modules/source-map-support": { - "version": "0.5.21", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", - "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", - "dev": true, - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, "node_modules/minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", @@ -4066,6 +4116,12 @@ "node": ">=8" } }, + "node_modules/ohash": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/ohash/-/ohash-1.1.4.tgz", + "integrity": "sha512-FlDryZAahJmEF3VR3w1KogSEdWX3WhA5GPakFx4J81kEAiHyLMpdLLElS8n8dfNadMgAne/MywcvmogzscVt4g==", + "dev": true + }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", @@ -4193,9 +4249,15 @@ "dev": true }, "node_modules/path-to-regexp": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.1.tgz", - "integrity": "sha512-JLyh7xT1kizaEvcaXOQwOc2/Yhw6KZOvPf1S8401UyLk86CU79LN3vl7ztXGm/pZ+YjoyAJ4rxmHwbkBXJX+yw==", + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.3.0.tgz", + "integrity": "sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==", + "dev": true + }, + "node_modules/pathe": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", + "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==", "dev": true }, "node_modules/pause-stream": { @@ -4613,15 +4675,6 @@ "duplexer": "~0.1.1" } }, - "node_modules/streamsearch": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", - "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==", - "dev": true, - "engines": { - "node": ">=10.0.0" - } - }, "node_modules/string-length": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", @@ -4878,18 +4931,37 @@ "node": ">=12.20" } }, + "node_modules/ufo": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.5.4.tgz", + "integrity": "sha512-UsUk3byDzKd04EyoZ7U4DOlxQaD14JUKQl6/P7wiX4FNvUfm3XL246n9W5AmqwW5RSFJ27NAuM0iLscAOYUiGQ==", + "dev": true + }, "node_modules/undici": { - "version": "5.23.0", - "resolved": "https://registry.npmjs.org/undici/-/undici-5.23.0.tgz", - "integrity": "sha512-1D7w+fvRsqlQ9GscLBwcAJinqcZGHUKjbOmXdlE/v8BvEGXjeWAax+341q44EuTcHXXnfyKNbKRq4Lg7OzhMmg==", + "version": "5.28.4", + "resolved": "https://registry.npmjs.org/undici/-/undici-5.28.4.tgz", + "integrity": "sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==", "dev": true, "dependencies": { - "busboy": "^1.6.0" + "@fastify/busboy": "^2.0.0" }, "engines": { "node": ">=14.0" } }, + "node_modules/unenv": { + "name": "unenv-nightly", + "version": "2.0.0-20241024-111401-d4156ac", + "resolved": "https://registry.npmjs.org/unenv-nightly/-/unenv-nightly-2.0.0-20241024-111401-d4156ac.tgz", + "integrity": "sha512-xJO1hfY+Te+/XnfCYrCbFbRcgu6XEODND1s5wnVbaBCkuQX7JXF7fHEXPrukFE2j8EOH848P8QN19VO47XN8hw==", + "dev": true, + "dependencies": { + "defu": "^6.1.4", + "ohash": "^1.1.4", + "pathe": "^1.1.2", + "ufo": "^1.5.4" + } + }, "node_modules/update-browserslist-db": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", @@ -4986,9 +5058,9 @@ } }, "node_modules/workerd": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/workerd/-/workerd-1.20231030.0.tgz", - "integrity": "sha512-+FSW+d31f8RrjHanFf/R9A+Z0csf3OtsvzdPmAKuwuZm/5HrBv83cvG9fFeTxl7/nI6irUUXIRF9xcj/NomQzQ==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/workerd/-/workerd-1.20241022.0.tgz", + "integrity": "sha512-jyGXsgO9DRcJyx6Ovv7gUyDPc3UYC2i/E0p9GFUg6GUzpldw4Y93y9kOmdfsOnKZ3+lY53veSiUniiBPE6Q2NQ==", "dev": true, "hasInstallScript": true, "bin": { @@ -4998,32 +5070,37 @@ "node": ">=16" }, "optionalDependencies": { - "@cloudflare/workerd-darwin-64": "1.20231030.0", - "@cloudflare/workerd-darwin-arm64": "1.20231030.0", - "@cloudflare/workerd-linux-64": "1.20231030.0", - "@cloudflare/workerd-linux-arm64": "1.20231030.0", - "@cloudflare/workerd-windows-64": "1.20231030.0" + "@cloudflare/workerd-darwin-64": "1.20241022.0", + "@cloudflare/workerd-darwin-arm64": "1.20241022.0", + "@cloudflare/workerd-linux-64": "1.20241022.0", + "@cloudflare/workerd-linux-arm64": "1.20241022.0", + "@cloudflare/workerd-windows-64": "1.20241022.0" } }, "node_modules/wrangler": { - "version": "3.19.0", - "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-3.19.0.tgz", - "integrity": "sha512-pY7xWqkQn6DJ+1vz9YHz2pCftEmK+JCTj9sqnucp0NZnlUiILDmBWegsjjCLZycgfiA62J213N7NvjLPr2LB8w==", + "version": "3.85.0", + "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-3.85.0.tgz", + "integrity": "sha512-r5YCWUaF4ApLnloNE6jHHgRYdFzYHoajTlC1tns42UzQ2Ls63VAqD3b0cxOqzDUfmlSb3skpmu0B0Ssi3QWPAg==", "dev": true, "dependencies": { - "@cloudflare/kv-asset-handler": "^0.2.0", + "@cloudflare/kv-asset-handler": "0.3.4", + "@cloudflare/workers-shared": "0.7.0", "@esbuild-plugins/node-globals-polyfill": "^0.2.3", "@esbuild-plugins/node-modules-polyfill": "^0.2.2", "blake3-wasm": "^2.1.5", "chokidar": "^3.5.3", + "date-fns": "^4.1.0", "esbuild": "0.17.19", - "miniflare": "3.20231030.3", + "itty-time": "^1.0.6", + "miniflare": "3.20241022.0", "nanoid": "^3.3.3", - "path-to-regexp": "^6.2.0", + "path-to-regexp": "^6.3.0", + "resolve": "^1.22.8", "resolve.exports": "^2.0.2", "selfsigned": "^2.0.1", - "source-map": "0.6.1", - "source-map-support": "0.5.21", + "source-map": "^0.6.1", + "unenv": "npm:unenv-nightly@2.0.0-20241024-111401-d4156ac", + "workerd": "1.20241022.0", "xxhash-wasm": "^1.0.1" }, "bin": { @@ -5035,16 +5112,14 @@ }, "optionalDependencies": { "fsevents": "~2.3.2" - } - }, - "node_modules/wrangler/node_modules/source-map-support": { - "version": "0.5.21", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", - "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", - "dev": true, - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" + }, + "peerDependencies": { + "@cloudflare/workers-types": "^4.20241022.0" + }, + "peerDependenciesMeta": { + "@cloudflare/workers-types": { + "optional": true + } } }, "node_modules/wrap-ansi": { @@ -5084,9 +5159,9 @@ } }, "node_modules/ws": { - "version": "8.13.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz", - "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==", + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz", + "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==", "dev": true, "engines": { "node": ">=10.0.0" @@ -5165,20 +5240,20 @@ } }, "node_modules/youch": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/youch/-/youch-3.2.3.tgz", - "integrity": "sha512-ZBcWz/uzZaQVdCvfV4uk616Bbpf2ee+F/AvuKDR5EwX/Y4v06xWdtMluqTD7+KlZdM93lLm9gMZYo0sKBS0pgw==", + "version": "3.3.4", + "resolved": "https://registry.npmjs.org/youch/-/youch-3.3.4.tgz", + "integrity": "sha512-UeVBXie8cA35DS6+nBkls68xaBBXCye0CNznrhszZjTbRVnJKQuNsyLKBTTL4ln1o1rh2PKtv35twV7irj5SEg==", "dev": true, "dependencies": { - "cookie": "^0.5.0", + "cookie": "^0.7.1", "mustache": "^4.2.0", "stacktracey": "^2.1.8" } }, "node_modules/zod": { - "version": "3.22.2", - "resolved": "https://registry.npmjs.org/zod/-/zod-3.22.2.tgz", - "integrity": "sha512-wvWkphh5WQsJbVk1tbx1l1Ly4yg+XecD+Mq280uBGt9wa5BKSWf4Mhp6GmrkPixhMxmabYY7RbzlwVP32pbGCg==", + "version": "3.23.8", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.23.8.tgz", + "integrity": "sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==", "dev": true, "funding": { "url": "https://github.com/sponsors/colinhacks" diff --git a/ecosystem-tests/cloudflare-worker/package.json b/ecosystem-tests/cloudflare-worker/package.json index 463de4045..3034e97f7 100644 --- a/ecosystem-tests/cloudflare-worker/package.json +++ b/ecosystem-tests/cloudflare-worker/package.json @@ -17,7 +17,7 @@ "start-server-and-test": "^2.0.0", "ts-jest": "^29.1.0", "typescript": "5.0.4", - "wrangler": "^3.0.0" + "wrangler": "^3.85.0" }, "dependencies": { "node-fetch": "^3.3.1" From 34306573a15a03a1e84177aa2f74d8e63adc0bf0 Mon Sep 17 00:00:00 2001 From: Young-Jin Park Date: Mon, 11 Nov 2024 17:43:09 -0500 Subject: [PATCH 031/246] feat: add back deno runtime testing without type checks --- ecosystem-tests/cli.ts | 18 ++++++++---------- ecosystem-tests/deno/deno.jsonc | 7 +++---- scripts/build-deno | 18 +++++++++++++----- 3 files changed, 24 insertions(+), 19 deletions(-) diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index b0ff712f1..4803b47c2 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -95,16 +95,14 @@ const projectRunners = { await run('bun', ['test']); } }, - // Temporarily comment this out until we can test with JSR transformations end-to-end. - // deno: async () => { - // // we don't need to explicitly install the package here - // // because our deno setup relies on `rootDir/deno` to exist - // // which is an artifact produced from our build process - // await run('deno', ['task', 'install']); - // await run('deno', ['task', 'check']); - - // if (state.live) await run('deno', ['task', 'test']); - // }, + deno: async () => { + // we don't need to explicitly install the package here + // because our deno setup relies on `rootDir/dist-deno` to exist + // which is an artifact produced from our build process + await run('deno', ['task', 'install', '--unstable-sloppy-imports']); + + if (state.live) await run('deno', ['task', 'test']); + }, }; let projectNames = Object.keys(projectRunners) as Array; diff --git a/ecosystem-tests/deno/deno.jsonc b/ecosystem-tests/deno/deno.jsonc index 7de05f2ba..46d7ee486 100644 --- a/ecosystem-tests/deno/deno.jsonc +++ b/ecosystem-tests/deno/deno.jsonc @@ -1,11 +1,10 @@ { "tasks": { "install": "deno install --node-modules-dir main_test.ts -f", - "check": "deno lint && deno check main_test.ts", - "test": "deno test --allow-env --allow-net --allow-read --node-modules-dir" + "test": "deno test --allow-env --allow-net --allow-read --node-modules-dir --unstable-sloppy-imports --no-check" }, "imports": { - "openai": "../../deno/mod.ts", - "openai/": "../../deno/" + "openai": "../../dist-deno/index.ts", + "openai/": "../../dist-deno/" } } diff --git a/scripts/build-deno b/scripts/build-deno index 4a2000a66..dfce83548 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -7,13 +7,21 @@ cd "$(dirname "$0")/.." rm -rf dist-deno; mkdir dist-deno cp -rp src/* jsr.json dist-deno +rm -rf dist-deno/shims + +rm dist-deno/_shims/node*.{js,mjs,ts} +rm dist-deno/_shims/manual*.{js,mjs,ts} +rm dist-deno/_shims/index.{d.ts,js,mjs} +for file in dist-deno/_shims/*-deno.ts; do + mv -- "$file" "${file%-deno.ts}.ts" +done + rm dist-deno/_shims/auto/*-node.ts -for dir in dist-deno/_shims dist-deno/_shims/auto; do - rm "${dir}"/*.{d.ts,js,mjs} - for file in "${dir}"/*-deno.ts; do - mv -- "$file" "${file%-deno.ts}.ts" - done +rm dist-deno/_shims/auto/*.{d.ts,js,mjs} +for file in dist-deno/_shims/auto/*-deno.ts; do + mv -- "$file" "${file%-deno.ts}.ts" done + for file in README.md LICENSE CHANGELOG.md; do if [ -e "${file}" ]; then cp "${file}" dist-deno; fi done From a92cc1dbc4ab3284c6654d69d5c39399a867f601 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 14:00:36 +0000 Subject: [PATCH 032/246] release: 4.72.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6fbbb03de..e53c9dd88 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.71.1" + ".": "4.72.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e74a8ee3..951ef0784 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.72.0 (2024-11-12) + +Full Changelog: [v4.71.1...v4.72.0](https://github.com/openai/openai-node/compare/v4.71.1...v4.72.0) + +### Features + +* add back deno runtime testing without type checks ([1626cf5](https://github.com/openai/openai-node/commit/1626cf57e94706e1fc8b2f9ff4f173fe486d5150)) + + +### Chores + +* **ecosystem-tests:** bump wrangler version ([#1178](https://github.com/openai/openai-node/issues/1178)) ([4dfb0c6](https://github.com/openai/openai-node/commit/4dfb0c6aa7c4530665bc7d6beebcd04aa1490e27)) + ## 4.71.1 (2024-11-06) Full Changelog: [v4.71.0...v4.71.1](https://github.com/openai/openai-node/compare/v4.71.0...v4.71.1) diff --git a/jsr.json b/jsr.json index 48a838612..ad1751852 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.71.1", + "version": "4.72.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index dd3dfba7a..85fbed4f1 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.71.1", + "version": "4.72.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 3474c77c3..cad6e2320 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.71.1'; // x-release-please-version +export const VERSION = '4.72.0'; // x-release-please-version From f555dd6503bc4ccd4d13f4e1a1d36fbbfd51c369 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Tue, 12 Nov 2024 16:47:12 +0000 Subject: [PATCH 033/246] chore(internal): use reexports not destructuring (#1181) --- src/index.ts | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/src/index.ts b/src/index.ts index c3299e00d..8e7e7804e 100644 --- a/src/index.ts +++ b/src/index.ts @@ -306,25 +306,6 @@ export class OpenAI extends Core.APIClient { static fileFromPath = Uploads.fileFromPath; } -export { - OpenAIError, - APIError, - APIConnectionError, - APIConnectionTimeoutError, - APIUserAbortError, - NotFoundError, - ConflictError, - RateLimitError, - BadRequestError, - AuthenticationError, - InternalServerError, - PermissionDeniedError, - UnprocessableEntityError, -} from './error'; - -export import toFile = Uploads.toFile; -export import fileFromPath = Uploads.fileFromPath; - OpenAI.Completions = Completions; OpenAI.Chat = Chat; OpenAI.Embeddings = Embeddings; @@ -340,7 +321,6 @@ OpenAI.Beta = Beta; OpenAI.Batches = Batches; OpenAI.BatchesPage = BatchesPage; OpenAI.Uploads = UploadsAPIUploads; - export declare namespace OpenAI { export type RequestOptions = Core.RequestOptions; @@ -664,4 +644,21 @@ const API_KEY_SENTINEL = ''; // ---------------------- End Azure ---------------------- +export { toFile, fileFromPath } from 'openai/uploads'; +export { + OpenAIError, + APIError, + APIConnectionError, + APIConnectionTimeoutError, + APIUserAbortError, + NotFoundError, + ConflictError, + RateLimitError, + BadRequestError, + AuthenticationError, + InternalServerError, + PermissionDeniedError, + UnprocessableEntityError, +} from 'openai/error'; + export default OpenAI; From 4ec402790cf3cfbccbf3ef9b61d577b0118977e8 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Tue, 12 Nov 2024 16:48:27 +0000 Subject: [PATCH 034/246] docs: bump models in example snippets to gpt-4o (#1184) --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index caa3f9d4a..8d30be928 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ const client = new OpenAI({ async function main() { const chatCompletion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-3.5-turbo', + model: 'gpt-4o', }); } @@ -57,7 +57,7 @@ const client = new OpenAI(); async function main() { const stream = await client.chat.completions.create({ - model: 'gpt-4', + model: 'gpt-4o', messages: [{ role: 'user', content: 'Say this is a test' }], stream: true, }); @@ -87,7 +87,7 @@ const client = new OpenAI({ async function main() { const params: OpenAI.Chat.ChatCompletionCreateParams = { messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-3.5-turbo', + model: 'gpt-4o', }; const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params); } @@ -333,7 +333,7 @@ a subclass of `APIError` will be thrown: ```ts async function main() { const job = await client.fineTuning.jobs - .create({ model: 'gpt-3.5-turbo', training_file: 'file-abc123' }) + .create({ model: 'gpt-4o', training_file: 'file-abc123' }) .catch(async (err) => { if (err instanceof OpenAI.APIError) { console.log(err.status); // 400 @@ -415,7 +415,7 @@ const client = new OpenAI({ }); // Or, configure per-request: -await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in Node.js?' }], model: 'gpt-3.5-turbo' }, { +await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in JavaScript?' }], model: 'gpt-4o' }, { maxRetries: 5, }); ``` @@ -432,7 +432,7 @@ const client = new OpenAI({ }); // Override per-request: -await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-3.5-turbo' }, { +await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-4o' }, { timeout: 5 * 1000, }); ``` @@ -485,13 +485,13 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi const client = new OpenAI(); const response = await client.chat.completions - .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' }) + .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }) .asResponse(); console.log(response.headers.get('X-My-Header')); console.log(response.statusText); // access the underlying Response object const { data: chatCompletion, response: raw } = await client.chat.completions - .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' }) + .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }) .withResponse(); console.log(raw.headers.get('X-My-Header')); console.log(chatCompletion); From 524b9e82ae13a3b5093dcfbfd1169a798cf99ab4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 14 Nov 2024 19:59:44 +0000 Subject: [PATCH 035/246] fix(docs): add missing await to pagination example (#1190) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8d30be928..2f05654b4 100644 --- a/README.md +++ b/README.md @@ -467,7 +467,7 @@ for (const fineTuningJob of page.data) { // Convenience methods are provided for manually paginating: while (page.hasNextPage()) { - page = page.getNextPage(); + page = await page.getNextPage(); // ... } ``` From 8ee6c0335673f2ecf84ea11bdfc990adab607e20 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Fri, 15 Nov 2024 08:31:55 +0000 Subject: [PATCH 036/246] chore(client): drop unused devDependency (#1191) --- package.json | 1 - src/index.ts | 4 ++-- yarn.lock | 40 ---------------------------------------- 3 files changed, 2 insertions(+), 43 deletions(-) diff --git a/package.json b/package.json index 85fbed4f1..8a61d468f 100644 --- a/package.json +++ b/package.json @@ -47,7 +47,6 @@ "prettier": "^3.0.0", "prettier-2": "npm:prettier@^2", "ts-jest": "^29.1.0", - "ts-morph": "^19.0.0", "ts-node": "^10.5.0", "tsc-multi": "^1.1.0", "tsconfig-paths": "^4.0.0", diff --git a/src/index.ts b/src/index.ts index 8e7e7804e..58d7410e4 100644 --- a/src/index.ts +++ b/src/index.ts @@ -644,7 +644,7 @@ const API_KEY_SENTINEL = ''; // ---------------------- End Azure ---------------------- -export { toFile, fileFromPath } from 'openai/uploads'; +export { toFile, fileFromPath } from './uploads'; export { OpenAIError, APIError, @@ -659,6 +659,6 @@ export { InternalServerError, PermissionDeniedError, UnprocessableEntityError, -} from 'openai/error'; +} from './error'; export default OpenAI; diff --git a/yarn.lock b/yarn.lock index 91b22b941..e139e1fbe 100644 --- a/yarn.lock +++ b/yarn.lock @@ -759,16 +759,6 @@ dependencies: "@swc/counter" "^0.1.3" -"@ts-morph/common@~0.20.0": - version "0.20.0" - resolved "https://registry.yarnpkg.com/@ts-morph/common/-/common-0.20.0.tgz#3f161996b085ba4519731e4d24c35f6cba5b80af" - integrity sha512-7uKjByfbPpwuzkstL3L5MQyuXPSKdoNG93Fmi2JoDcTf3pEP731JdRFAduRVkOs8oqxPsXKA+ScrWkdQ8t/I+Q== - dependencies: - fast-glob "^3.2.12" - minimatch "^7.4.3" - mkdirp "^2.1.6" - path-browserify "^1.0.1" - "@tsconfig/node10@^1.0.7": version "1.0.8" resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.8.tgz#c1e4e80d6f964fbecb3359c43bd48b40f7cadad9" @@ -1315,11 +1305,6 @@ co@^4.6.0: resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" integrity sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ== -code-block-writer@^12.0.0: - version "12.0.0" - resolved "https://registry.yarnpkg.com/code-block-writer/-/code-block-writer-12.0.0.tgz#4dd58946eb4234105aff7f0035977b2afdc2a770" - integrity sha512-q4dMFMlXtKR3XNBHyMHt/3pwYNA69EDk00lloMOaaUMKPUXBw6lpXtbu3MMVG6/uOihGnRDOlkyqsONEUj60+w== - collect-v8-coverage@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz#c0b29bcd33bcd0779a1344c2136051e6afd3d9e9" @@ -2687,23 +2672,11 @@ minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@^3.1.2: dependencies: brace-expansion "^1.1.7" -minimatch@^7.4.3: - version "7.4.6" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-7.4.6.tgz#845d6f254d8f4a5e4fd6baf44d5f10c8448365fb" - integrity sha512-sBz8G/YjVniEz6lKPNpKxXwazJe4c19fEfV2GDMX6AjFz+MX9uDWIZW8XreVhkFW3fkIdTv/gxWr/Kks5FFAVw== - dependencies: - brace-expansion "^2.0.1" - minimist@^1.2.6: version "1.2.6" resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44" integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q== -mkdirp@^2.1.6: - version "2.1.6" - resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-2.1.6.tgz#964fbcb12b2d8c5d6fbc62a963ac95a273e2cc19" - integrity sha512-+hEnITedc8LAtIP9u3HJDFIdcLV2vXP33sqLLIzkv1Db1zO/1OxbvYf0Y1OC/S/Qo5dxHXepofhmxL02PsKe+A== - ms@2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" @@ -2867,11 +2840,6 @@ parse-json@^5.2.0: json-parse-even-better-errors "^2.3.0" lines-and-columns "^1.1.6" -path-browserify@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/path-browserify/-/path-browserify-1.0.1.tgz#d98454a9c3753d5790860f16f68867b9e46be1fd" - integrity sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g== - path-exists@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" @@ -3300,14 +3268,6 @@ ts-jest@^29.1.0: semver "^7.5.3" yargs-parser "^21.0.1" -ts-morph@^19.0.0: - version "19.0.0" - resolved "https://registry.yarnpkg.com/ts-morph/-/ts-morph-19.0.0.tgz#43e95fb0156c3fe3c77c814ac26b7d0be2f93169" - integrity sha512-D6qcpiJdn46tUqV45vr5UGM2dnIEuTGNxVhg0sk5NX11orcouwj6i1bMqZIz2mZTZB1Hcgy7C3oEVhAT+f6mbQ== - dependencies: - "@ts-morph/common" "~0.20.0" - code-block-writer "^12.0.0" - ts-node@^10.5.0: version "10.7.0" resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-10.7.0.tgz#35d503d0fab3e2baa672a0e94f4b40653c2463f5" From 12f93346857196b93f94865cc3744d769e5e519c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 10:42:10 +0000 Subject: [PATCH 037/246] chore(internal): spec update (#1195) --- .stats.yml | 2 +- .../audio/transcriptions.test.ts | 2 +- tests/api-resources/beta/assistants.test.ts | 8 +- .../beta/threads/messages.test.ts | 15 +- .../beta/threads/runs/runs.test.ts | 99 +-------- .../beta/threads/threads.test.ts | 198 +----------------- tests/api-resources/chat/completions.test.ts | 10 +- .../fine-tuning/jobs/jobs.test.ts | 25 +-- tests/api-resources/uploads/uploads.test.ts | 9 +- 9 files changed, 22 insertions(+), 346 deletions(-) diff --git a/.stats.yml b/.stats.yml index f368bc881..fdef8d274 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2f8ca92b9b1879fd535b685e4767338413fcd533d42f3baac13a9c41da3fce35.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-fb9db2d2c1f0d6b39d8ee042db5d5c59acba6ad1daf47c18792c1f5fb24b3401.yml diff --git a/tests/api-resources/audio/transcriptions.test.ts b/tests/api-resources/audio/transcriptions.test.ts index ef2797911..86ef5e576 100644 --- a/tests/api-resources/audio/transcriptions.test.ts +++ b/tests/api-resources/audio/transcriptions.test.ts @@ -31,7 +31,7 @@ describe('resource transcriptions', () => { prompt: 'prompt', response_format: 'json', temperature: 0, - timestamp_granularities: ['word', 'segment'], + timestamp_granularities: ['word'], }); }); }); diff --git a/tests/api-resources/beta/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts index fdc325254..a64465c77 100644 --- a/tests/api-resources/beta/assistants.test.ts +++ b/tests/api-resources/beta/assistants.test.ts @@ -30,15 +30,13 @@ describe('resource assistants', () => { response_format: 'auto', temperature: 1, tool_resources: { - code_interpreter: { file_ids: ['string', 'string', 'string'] }, + code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [ - { chunking_strategy: { type: 'auto' }, file_ids: ['string', 'string', 'string'], metadata: {} }, - ], + vector_stores: [{ chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: {} }], }, }, - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + tools: [{ type: 'code_interpreter' }], top_p: 1, }); }); diff --git a/tests/api-resources/beta/threads/messages.test.ts b/tests/api-resources/beta/threads/messages.test.ts index bfbcab1cb..c1f5f7b6e 100644 --- a/tests/api-resources/beta/threads/messages.test.ts +++ b/tests/api-resources/beta/threads/messages.test.ts @@ -27,20 +27,7 @@ describe('resource messages', () => { const response = await client.beta.threads.messages.create('thread_id', { content: 'string', role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], - }, - { - file_id: 'file_id', - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], - }, - { - file_id: 'file_id', - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], - }, - ], + attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], metadata: {}, }); }); diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 352d775c0..4fd8261ac 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -29,94 +29,7 @@ describe('resource runs', () => { { content: 'string', role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], + attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], metadata: {}, }, ], @@ -130,7 +43,7 @@ describe('resource runs', () => { stream: false, temperature: 1, tool_choice: 'none', - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + tools: [{ type: 'code_interpreter' }], top_p: 1, truncation_strategy: { type: 'auto', last_messages: 1 }, }); @@ -214,7 +127,7 @@ describe('resource runs', () => { test('submitToolOutputs: only required params', async () => { const responsePromise = client.beta.threads.runs.submitToolOutputs('thread_id', 'run_id', { - tool_outputs: [{}, {}, {}], + tool_outputs: [{}], }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -227,11 +140,7 @@ describe('resource runs', () => { test('submitToolOutputs: required and optional params', async () => { const response = await client.beta.threads.runs.submitToolOutputs('thread_id', 'run_id', { - tool_outputs: [ - { output: 'output', tool_call_id: 'tool_call_id' }, - { output: 'output', tool_call_id: 'tool_call_id' }, - { output: 'output', tool_call_id: 'tool_call_id' }, - ], + tool_outputs: [{ output: 'output', tool_call_id: 'tool_call_id' }], stream: false, }); }); diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index dc0a94a7d..aba266316 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -36,109 +36,16 @@ describe('resource threads', () => { { content: 'string', role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], + attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], metadata: {}, }, ], metadata: {}, tool_resources: { - code_interpreter: { file_ids: ['string', 'string', 'string'] }, + code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [ - { - chunking_strategy: { type: 'auto' }, - file_ids: ['string', 'string', 'string'], - metadata: {}, - }, - ], + vector_stores: [{ chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: {} }], }, }, }, @@ -222,114 +129,25 @@ describe('resource threads', () => { { content: 'string', role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], + attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], metadata: {}, }, ], metadata: {}, tool_resources: { - code_interpreter: { file_ids: ['string', 'string', 'string'] }, + code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [ - { chunking_strategy: { type: 'auto' }, file_ids: ['string', 'string', 'string'], metadata: {} }, - ], + vector_stores: [{ chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: {} }], }, }, }, tool_choice: 'none', tool_resources: { - code_interpreter: { file_ids: ['string', 'string', 'string'] }, + code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'] }, }, - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + tools: [{ type: 'code_interpreter' }], top_p: 1, truncation_strategy: { type: 'auto', last_messages: 1 }, }); diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 180a1d77f..5dcbf9ad6 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -36,7 +36,7 @@ describe('resource completions', () => { max_completion_tokens: 0, max_tokens: 0, metadata: { foo: 'string' }, - modalities: ['text', 'audio'], + modalities: ['text'], n: 1, parallel_tool_calls: true, prediction: { content: 'string', type: 'content' }, @@ -55,14 +55,6 @@ describe('resource completions', () => { function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, type: 'function', }, - { - function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, - type: 'function', - }, - { - function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, - type: 'function', - }, ], top_logprobs: 0, top_p: 1, diff --git a/tests/api-resources/fine-tuning/jobs/jobs.test.ts b/tests/api-resources/fine-tuning/jobs/jobs.test.ts index 646c2f5cf..0ab09768a 100644 --- a/tests/api-resources/fine-tuning/jobs/jobs.test.ts +++ b/tests/api-resources/fine-tuning/jobs/jobs.test.ts @@ -31,30 +31,7 @@ describe('resource jobs', () => { integrations: [ { type: 'wandb', - wandb: { - project: 'my-wandb-project', - entity: 'entity', - name: 'name', - tags: ['custom-tag', 'custom-tag', 'custom-tag'], - }, - }, - { - type: 'wandb', - wandb: { - project: 'my-wandb-project', - entity: 'entity', - name: 'name', - tags: ['custom-tag', 'custom-tag', 'custom-tag'], - }, - }, - { - type: 'wandb', - wandb: { - project: 'my-wandb-project', - entity: 'entity', - name: 'name', - tags: ['custom-tag', 'custom-tag', 'custom-tag'], - }, + wandb: { project: 'my-wandb-project', entity: 'entity', name: 'name', tags: ['custom-tag'] }, }, ], seed: 42, diff --git a/tests/api-resources/uploads/uploads.test.ts b/tests/api-resources/uploads/uploads.test.ts index e4e3c6d30..c9ea4ddd7 100644 --- a/tests/api-resources/uploads/uploads.test.ts +++ b/tests/api-resources/uploads/uploads.test.ts @@ -53,9 +53,7 @@ describe('resource uploads', () => { }); test('complete: only required params', async () => { - const responsePromise = client.uploads.complete('upload_abc123', { - part_ids: ['string', 'string', 'string'], - }); + const responsePromise = client.uploads.complete('upload_abc123', { part_ids: ['string'] }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -66,9 +64,6 @@ describe('resource uploads', () => { }); test('complete: required and optional params', async () => { - const response = await client.uploads.complete('upload_abc123', { - part_ids: ['string', 'string', 'string'], - md5: 'md5', - }); + const response = await client.uploads.complete('upload_abc123', { part_ids: ['string'], md5: 'md5' }); }); }); From 6961c37f2e581bcc12ec2bbe77df2b9b260fe297 Mon Sep 17 00:00:00 2001 From: Young-Jin Park Date: Mon, 18 Nov 2024 16:11:29 -0500 Subject: [PATCH 038/246] feat: bump model in all example snippets to gpt-4o --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 2f05654b4..c363eaa98 100644 --- a/README.md +++ b/README.md @@ -173,7 +173,7 @@ const openai = new OpenAI(); async function main() { const stream = await openai.beta.chat.completions.stream({ - model: 'gpt-4', + model: 'gpt-4o', messages: [{ role: 'user', content: 'Say this is a test' }], stream: true, }); @@ -226,7 +226,7 @@ const client = new OpenAI(); async function main() { const runner = client.beta.chat.completions .runTools({ - model: 'gpt-3.5-turbo', + model: 'gpt-4o', messages: [{ role: 'user', content: 'How is the weather this week?' }], tools: [ { @@ -368,7 +368,7 @@ Error codes are as followed: All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI. ```ts -const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4' }); +const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }); console.log(completion._request_id) // req_123 ``` @@ -392,7 +392,7 @@ const azureADTokenProvider = getBearerTokenProvider(credential, scope); const openai = new AzureOpenAI({ azureADTokenProvider }); const result = await openai.chat.completions.create({ - model: 'gpt-4-1106-preview', + model: 'gpt-4o', messages: [{ role: 'user', content: 'Say hello!' }], }); From ebdb4f72cc01afbee649aca009fdaf413e61c507 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 19 Nov 2024 17:57:52 +0000 Subject: [PATCH 039/246] docs: improve jsr documentation (#1197) --- README.md | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index c363eaa98..5d6ba1a8b 100644 --- a/README.md +++ b/README.md @@ -14,16 +14,21 @@ To learn how to use the OpenAI API, check out our [API Reference](https://platfo npm install openai ``` -You can also import from jsr: +### Installation from JSR - +```sh +deno add jsr:@openai/openai +npx jsr add @openai/openai +``` + +These commands will make the module importable from the `@openai/openai` scope: + +You can also [import directly from JSR](https://jsr.io/docs/using-packages#importing-with-jsr-specifiers) without an install step if you're using the Deno JavaScript runtime: ```ts import OpenAI from 'jsr:@openai/openai'; ``` - - ## Usage The full API of this library can be found in [api.md file](api.md) along with many [code examples](https://github.com/openai/openai-node/tree/master/examples). The code below shows how to get started using the chat completions API. @@ -622,7 +627,7 @@ TypeScript >= 4.5 is supported. The following runtimes are supported: - Node.js 18 LTS or later ([non-EOL](https://endoflife.date/nodejs)) versions. -- Deno v1.28.0 or higher, using `import OpenAI from "npm:openai"`. +- Deno v1.28.0 or higher. - Bun 1.0 or later. - Cloudflare Workers. - Vercel Edge Runtime. From e34981c00f2f0360baffe870bcc38786030671bf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 19 Nov 2024 23:41:02 +0000 Subject: [PATCH 040/246] docs: change readme title (#1198) --- README.md | 2 +- scripts/build | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5d6ba1a8b..d89e121f1 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# OpenAI Node API Library +# OpenAI TypeScript and JavaScript API Library [![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/openai) [![JSR Version](https://jsr.io/badges/@openai/openai)](https://jsr.io/@openai/openai) diff --git a/scripts/build b/scripts/build index 0246c90e3..4e86f99e2 100755 --- a/scripts/build +++ b/scripts/build @@ -32,7 +32,7 @@ npm exec tsc-multi # copy over handwritten .js/.mjs/.d.ts files cp src/_shims/*.{d.ts,js,mjs,md} dist/_shims cp src/_shims/auto/*.{d.ts,js,mjs} dist/_shims/auto -# we need to add exports = module.exports = OpenAI Node to index.js; +# we need to add exports = module.exports = OpenAI to index.js; # No way to get that from index.ts because it would cause compile errors # when building .mjs node scripts/utils/fix-index-exports.cjs From 3968ef1c4fa860ff246e0e803808752b261c18ce Mon Sep 17 00:00:00 2001 From: Eric He Date: Wed, 20 Nov 2024 02:35:46 -0800 Subject: [PATCH 041/246] docs(readme): fix incorrect fileBatches.uploadAndPoll params (#1200) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d89e121f1..ec17427a6 100644 --- a/README.md +++ b/README.md @@ -133,7 +133,7 @@ const fileList = [ ... ]; -const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, fileList); +const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, {files: fileList}); ``` ### Streaming Helpers From 0feeafd21ba4b6281cc3b9dafa2919b1e2e4d1c3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 20 Nov 2024 18:17:02 +0000 Subject: [PATCH 042/246] feat(api): add gpt-4o-2024-11-20 model (#1201) --- .stats.yml | 2 +- src/resources/batches.ts | 2 +- src/resources/chat/chat.ts | 1 + src/resources/chat/completions.ts | 5 +++-- src/resources/files.ts | 2 +- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.stats.yml b/.stats.yml index fdef8d274..4827e5388 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-fb9db2d2c1f0d6b39d8ee042db5d5c59acba6ad1daf47c18792c1f5fb24b3401.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-aa9b01fc0c17eb0cbc200533fc20d6a49c5e764ceaf8049e08b294532be6e9ff.yml diff --git a/src/resources/batches.ts b/src/resources/batches.ts index e68e7569c..ec5ca6331 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -232,7 +232,7 @@ export interface BatchCreateParams { * Your input file must be formatted as a * [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), * and must be uploaded with the purpose `batch`. The file can contain up to 50,000 - * requests, and can be up to 100 MB in size. + * requests, and can be up to 200 MB in size. */ input_file_id: string; diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 351430f8c..09cd3d123 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -49,6 +49,7 @@ export type ChatModel = | 'o1-mini' | 'o1-mini-2024-09-12' | 'gpt-4o' + | 'gpt-4o-2024-11-20' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-05-13' | 'gpt-4o-realtime-preview' diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 9d344744a..8e9a4385e 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -250,8 +250,9 @@ export interface ChatCompletionAudioParam { format: 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16'; /** - * The voice the model uses to respond. Supported voices are `alloy`, `ash`, - * `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`. + * The voice the model uses to respond. Supported voices are `ash`, `ballad`, + * `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, + * `echo`, and `shimmer`; these voices are less expressive). */ voice: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; } diff --git a/src/resources/files.ts b/src/resources/files.ts index 48d8f8747..42a7bdfba 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -25,7 +25,7 @@ export class Files extends APIResource { * [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) * models. * - * The Batch API only supports `.jsonl` files up to 100 MB in size. The input also + * The Batch API only supports `.jsonl` files up to 200 MB in size. The input also * has a specific required * [format](https://platform.openai.com/docs/api-reference/batch/request-input). * From 1e9391bc17c29287f2b7bb8acf77390f3e727ad2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 20 Nov 2024 18:17:37 +0000 Subject: [PATCH 043/246] release: 4.73.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 29 +++++++++++++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 33 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e53c9dd88..d3e848620 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.72.0" + ".": "4.73.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 951ef0784..51741f552 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,34 @@ # Changelog +## 4.73.0 (2024-11-20) + +Full Changelog: [v4.72.0...v4.73.0](https://github.com/openai/openai-node/compare/v4.72.0...v4.73.0) + +### Features + +* **api:** add gpt-4o-2024-11-20 model ([#1201](https://github.com/openai/openai-node/issues/1201)) ([0feeafd](https://github.com/openai/openai-node/commit/0feeafd21ba4b6281cc3b9dafa2919b1e2e4d1c3)) +* bump model in all example snippets to gpt-4o ([6961c37](https://github.com/openai/openai-node/commit/6961c37f2e581bcc12ec2bbe77df2b9b260fe297)) + + +### Bug Fixes + +* **docs:** add missing await to pagination example ([#1190](https://github.com/openai/openai-node/issues/1190)) ([524b9e8](https://github.com/openai/openai-node/commit/524b9e82ae13a3b5093dcfbfd1169a798cf99ab4)) + + +### Chores + +* **client:** drop unused devDependency ([#1191](https://github.com/openai/openai-node/issues/1191)) ([8ee6c03](https://github.com/openai/openai-node/commit/8ee6c0335673f2ecf84ea11bdfc990adab607e20)) +* **internal:** spec update ([#1195](https://github.com/openai/openai-node/issues/1195)) ([12f9334](https://github.com/openai/openai-node/commit/12f93346857196b93f94865cc3744d769e5e519c)) +* **internal:** use reexports not destructuring ([#1181](https://github.com/openai/openai-node/issues/1181)) ([f555dd6](https://github.com/openai/openai-node/commit/f555dd6503bc4ccd4d13f4e1a1d36fbbfd51c369)) + + +### Documentation + +* bump models in example snippets to gpt-4o ([#1184](https://github.com/openai/openai-node/issues/1184)) ([4ec4027](https://github.com/openai/openai-node/commit/4ec402790cf3cfbccbf3ef9b61d577b0118977e8)) +* change readme title ([#1198](https://github.com/openai/openai-node/issues/1198)) ([e34981c](https://github.com/openai/openai-node/commit/e34981c00f2f0360baffe870bcc38786030671bf)) +* improve jsr documentation ([#1197](https://github.com/openai/openai-node/issues/1197)) ([ebdb4f7](https://github.com/openai/openai-node/commit/ebdb4f72cc01afbee649aca009fdaf413e61c507)) +* **readme:** fix incorrect fileBatches.uploadAndPoll params ([#1200](https://github.com/openai/openai-node/issues/1200)) ([3968ef1](https://github.com/openai/openai-node/commit/3968ef1c4fa860ff246e0e803808752b261c18ce)) + ## 4.72.0 (2024-11-12) Full Changelog: [v4.71.1...v4.72.0](https://github.com/openai/openai-node/compare/v4.71.1...v4.72.0) diff --git a/jsr.json b/jsr.json index ad1751852..f09f5bbab 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.72.0", + "version": "4.73.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 8a61d468f..13e8ee3bc 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.72.0", + "version": "4.73.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index cad6e2320..4e3a33b17 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.72.0'; // x-release-please-version +export const VERSION = '4.73.0'; // x-release-please-version From aa5443624b4dc206ede08a743ec276b3a576861f Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 22 Nov 2024 19:56:39 +0000 Subject: [PATCH 044/246] docs(readme): mention `.withResponse()` for streaming request ID (#1202) --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index ec17427a6..ac9c84a42 100644 --- a/README.md +++ b/README.md @@ -377,6 +377,18 @@ const completion = await client.chat.completions.create({ messages: [{ role: 'us console.log(completion._request_id) // req_123 ``` +You can also access the Request ID using the `.withResponse()` method: + +```ts +const { data: stream, request_id } = await openai.chat.completions + .create({ + model: 'gpt-4', + messages: [{ role: 'user', content: 'Say this is a test' }], + stream: true, + }) + .withResponse(); +``` + ## Microsoft Azure OpenAI To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI` From 3f8634ed111782e3090a25d1d8640e050fb2c45b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 05:07:22 +0000 Subject: [PATCH 045/246] release: 4.73.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d3e848620..92fcace17 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.73.0" + ".": "4.73.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 51741f552..c32a0ce32 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.73.1 (2024-11-25) + +Full Changelog: [v4.73.0...v4.73.1](https://github.com/openai/openai-node/compare/v4.73.0...v4.73.1) + +### Documentation + +* **readme:** mention `.withResponse()` for streaming request ID ([#1202](https://github.com/openai/openai-node/issues/1202)) ([b6800d4](https://github.com/openai/openai-node/commit/b6800d4dea2729fe3b0864171ce8fb3b2cc1b21c)) + ## 4.73.0 (2024-11-20) Full Changelog: [v4.72.0...v4.73.0](https://github.com/openai/openai-node/compare/v4.72.0...v4.73.0) diff --git a/jsr.json b/jsr.json index f09f5bbab..0bd5eab3f 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.73.0", + "version": "4.73.1", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 13e8ee3bc..685d59f56 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.73.0", + "version": "4.73.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 4e3a33b17..28fbb6572 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.73.0'; // x-release-please-version +export const VERSION = '4.73.1'; // x-release-please-version From 2628a0bc6a380478889d94cf6f08cb179eab9e9c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 27 Nov 2024 16:15:42 +0000 Subject: [PATCH 046/246] feat(internal): make git install file structure match npm (#1204) --- package.json | 2 +- scripts/utils/check-is-in-git-install.sh | 2 +- scripts/utils/git-swap.sh | 13 +++++++++++++ 3 files changed, 15 insertions(+), 2 deletions(-) create mode 100755 scripts/utils/git-swap.sh diff --git a/package.json b/package.json index 685d59f56..87004d273 100644 --- a/package.json +++ b/package.json @@ -18,7 +18,7 @@ "build": "./scripts/build", "prepublishOnly": "echo 'to publish, run yarn build && (cd dist; yarn publish)' && exit 1", "format": "prettier --write --cache --cache-strategy metadata . !dist", - "prepare": "if ./scripts/utils/check-is-in-git-install.sh; then ./scripts/build; fi", + "prepare": "if ./scripts/utils/check-is-in-git-install.sh; then ./scripts/build && ./scripts/utils/git-swap.sh; fi", "tsn": "ts-node -r tsconfig-paths/register", "lint": "./scripts/lint", "fix": "./scripts/format" diff --git a/scripts/utils/check-is-in-git-install.sh b/scripts/utils/check-is-in-git-install.sh index 36bcedc20..1354eb432 100755 --- a/scripts/utils/check-is-in-git-install.sh +++ b/scripts/utils/check-is-in-git-install.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Check if you happen to call prepare for a repository that's already in node_modules. [ "$(basename "$(dirname "$PWD")")" = 'node_modules' ] || # The name of the containing directory that 'npm` uses, which looks like diff --git a/scripts/utils/git-swap.sh b/scripts/utils/git-swap.sh new file mode 100755 index 000000000..79d1888eb --- /dev/null +++ b/scripts/utils/git-swap.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -exuo pipefail +# the package is published to NPM from ./dist +# we want the final file structure for git installs to match the npm installs, so we + +# delete everything except ./dist and ./node_modules +find . -maxdepth 1 -mindepth 1 ! -name 'dist' ! -name 'node_modules' -exec rm -rf '{}' + + +# move everything from ./dist to . +mv dist/* . + +# delete the now-empty ./dist +rmdir dist From d40c61cfc8c4f5f6aea4ffdd3ea3909e02b92bd5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 05:07:36 +0000 Subject: [PATCH 047/246] release: 4.74.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 92fcace17..8edd9c22e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.73.1" + ".": "4.74.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index c32a0ce32..595091ff3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.74.0 (2024-12-02) + +Full Changelog: [v4.73.1...v4.74.0](https://github.com/openai/openai-node/compare/v4.73.1...v4.74.0) + +### Features + +* **internal:** make git install file structure match npm ([#1204](https://github.com/openai/openai-node/issues/1204)) ([e7c4c6d](https://github.com/openai/openai-node/commit/e7c4c6d23adbe52300053a8d35db6e341c438703)) + ## 4.73.1 (2024-11-25) Full Changelog: [v4.73.0...v4.73.1](https://github.com/openai/openai-node/compare/v4.73.0...v4.73.1) diff --git a/jsr.json b/jsr.json index 0bd5eab3f..eb073e7e6 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.73.1", + "version": "4.74.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 87004d273..7e188774a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.73.1", + "version": "4.74.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 28fbb6572..b8dd781be 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.73.1'; // x-release-please-version +export const VERSION = '4.74.0'; // x-release-please-version From d0e210dd43b8cfbc804111b9923a26dd30bcc87f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 02:02:23 +0000 Subject: [PATCH 048/246] feat: improve docs for jsr README.md (#1208) --- scripts/build-deno | 2 + scripts/utils/convert-jsr-readme.cjs | 140 +++++++++++++++++++++++++++ 2 files changed, 142 insertions(+) create mode 100644 scripts/utils/convert-jsr-readme.cjs diff --git a/scripts/build-deno b/scripts/build-deno index dfce83548..bce31078e 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -25,3 +25,5 @@ done for file in README.md LICENSE CHANGELOG.md; do if [ -e "${file}" ]; then cp "${file}" dist-deno; fi done + +node scripts/utils/convert-jsr-readme.cjs ./dist-deno/README.md diff --git a/scripts/utils/convert-jsr-readme.cjs b/scripts/utils/convert-jsr-readme.cjs new file mode 100644 index 000000000..f9d089c73 --- /dev/null +++ b/scripts/utils/convert-jsr-readme.cjs @@ -0,0 +1,140 @@ +const fs = require('fs'); +const { parse } = require('@typescript-eslint/parser'); +const { TSError } = require('@typescript-eslint/typescript-estree'); + +/** + * Quick and dirty AST traversal + */ +function traverse(node, visitor) { + if (!node || typeof node.type !== 'string') return; + visitor.node?.(node); + visitor[node.type]?.(node); + for (const key in node) { + const value = node[key]; + if (Array.isArray(value)) { + for (const elem of value) traverse(elem, visitor); + } else if (value instanceof Object) { + traverse(value, visitor); + } + } +} + +/** + * Helper method for replacing arbitrary ranges of text in input code. + */ +function replaceRanges(code, replacer) { + const replacements = []; + replacer({ replace: (range, replacement) => replacements.push({ range, replacement }) }); + + if (!replacements.length) return code; + replacements.sort((a, b) => a.range[0] - b.range[0]); + const overlapIndex = replacements.findIndex( + (r, index) => index > 0 && replacements[index - 1].range[1] > r.range[0], + ); + if (overlapIndex >= 0) { + throw new Error( + `replacements overlap: ${JSON.stringify(replacements[overlapIndex - 1])} and ${JSON.stringify( + replacements[overlapIndex], + )}`, + ); + } + + const parts = []; + let end = 0; + for (const { + range: [from, to], + replacement, + } of replacements) { + if (from > end) parts.push(code.substring(end, from)); + parts.push(replacement); + end = to; + } + if (end < code.length) parts.push(code.substring(end)); + return parts.join(''); +} + +function replaceProcessEnv(content) { + // Replace process.env['KEY'] and process.env.KEY with Deno.env.get('KEY') + return content.replace(/process\.env(?:\.|\[['"])(.+?)(?:['"]\])/g, "Deno.env.get('$1')"); +} + +function replaceProcessStdout(content) { + return content.replace(/process\.stdout.write\(([^)]+)\)/g, 'Deno.stdout.writeSync($1)'); +} + +function replaceInstallationDirections(content) { + // Remove npm installation section + return content.replace(/```sh\nnpm install.*?\n```.*### Installation from JSR\n\n/s, ''); +} + +/** + * Maps over module paths in imports and exports + */ +function replaceImports(code, config) { + try { + const ast = parse(code, { sourceType: 'module', range: true }); + return replaceRanges(code, ({ replace }) => + traverse(ast, { + node(node) { + switch (node.type) { + case 'ImportDeclaration': + case 'ExportNamedDeclaration': + case 'ExportAllDeclaration': + case 'ImportExpression': + if (node.source) { + const { range, value } = node.source; + if (value.startsWith(config.npm)) { + replace(range, JSON.stringify(value.replace(config.npm, config.jsr))); + } + } + } + }, + }), + ); + } catch (e) { + if (e instanceof TSError) { + // This can error if the code block is not valid TS, in this case give up trying to transform the imports. + console.warn(`Original codeblock could not be parsed, replace import skipped: ${e}\n\n${code}`); + return code; + } + throw e; + } +} + +function processReadme(config, file) { + try { + let readmeContent = fs.readFileSync(file, 'utf8'); + + // First replace installation directions + readmeContent = replaceInstallationDirections(readmeContent); + + // Replace content in all code blocks with a single regex + readmeContent = readmeContent.replaceAll( + /```(?:typescript|ts|javascript|js)\n([\s\S]*?)```/g, + (match, codeBlock) => { + try { + let transformedCode = codeBlock.trim(); + transformedCode = replaceImports(transformedCode, config); + transformedCode = replaceProcessEnv(transformedCode); + transformedCode = replaceProcessStdout(transformedCode); + return '```typescript\n' + transformedCode + '\n```'; + } catch (error) { + console.warn(`Failed to transform code block: ${error}\n\n${codeBlock}`); + return match; // Return original code block if transformation fails + } + }, + ); + + fs.writeFileSync(file, readmeContent); + } catch (error) { + console.error('Error processing README:', error); + throw error; + } +} + +const config = { + npm: 'openai', + jsr: '@openai/openai', +}; + +processReadme(config, process.argv[2]); From ddb27b660950735f13934759c3db049bcf4dafd5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 05:07:20 +0000 Subject: [PATCH 049/246] release: 4.75.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 8edd9c22e..6258f1481 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.74.0" + ".": "4.75.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 595091ff3..2d91a77c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.75.0 (2024-12-03) + +Full Changelog: [v4.74.0...v4.75.0](https://github.com/openai/openai-node/compare/v4.74.0...v4.75.0) + +### Features + +* improve docs for jsr README.md ([#1208](https://github.com/openai/openai-node/issues/1208)) ([338527e](https://github.com/openai/openai-node/commit/338527e40361e2de899a63f280d4ec2db5e87f3c)) + ## 4.74.0 (2024-12-02) Full Changelog: [v4.73.1...v4.74.0](https://github.com/openai/openai-node/compare/v4.73.1...v4.74.0) diff --git a/jsr.json b/jsr.json index eb073e7e6..a394539d1 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.74.0", + "version": "4.75.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 7e188774a..5738871a3 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.74.0", + "version": "4.75.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index b8dd781be..82fc52958 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.74.0'; // x-release-please-version +export const VERSION = '4.75.0'; // x-release-please-version From 0f74bf4576ed26884f9ef9148bd854e60250c1a9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 20:20:25 +0000 Subject: [PATCH 050/246] chore: bump openapi url (#1210) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 4827e5388..19920c8be 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-aa9b01fc0c17eb0cbc200533fc20d6a49c5e764ceaf8049e08b294532be6e9ff.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-d702cba829ceda336f44d0eb89ce61dba353849a40f0193e7007439345daf1bb.yml From f19c56e6087423cb2ef20aaa6b597467f4d81e48 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 20:53:30 +0000 Subject: [PATCH 051/246] feat(api): updates (#1212) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 19920c8be..3cc042fe0 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-d702cba829ceda336f44d0eb89ce61dba353849a40f0193e7007439345daf1bb.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2e0e0678be19d1118fd796af291822075e40538dba326611e177e9f3dc245a53.yml From fbd968576357e635e541a3475a67fb741f603292 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 5 Dec 2024 05:07:39 +0000 Subject: [PATCH 052/246] release: 4.76.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6258f1481..1cc8c9627 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.75.0" + ".": "4.76.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d91a77c9..e68b45e8a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.76.0 (2024-12-05) + +Full Changelog: [v4.75.0...v4.76.0](https://github.com/openai/openai-node/compare/v4.75.0...v4.76.0) + +### Features + +* **api:** updates ([#1212](https://github.com/openai/openai-node/issues/1212)) ([e0fedf2](https://github.com/openai/openai-node/commit/e0fedf2c5a91d0c03d8dad6854b366f77eab4923)) + + +### Chores + +* bump openapi url ([#1210](https://github.com/openai/openai-node/issues/1210)) ([3fa95a4](https://github.com/openai/openai-node/commit/3fa95a429d4b2adecce35a7b96b73f6d5e88eeeb)) + ## 4.75.0 (2024-12-03) Full Changelog: [v4.74.0...v4.75.0](https://github.com/openai/openai-node/compare/v4.74.0...v4.75.0) diff --git a/jsr.json b/jsr.json index a394539d1..2c6820969 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.75.0", + "version": "4.76.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 5738871a3..fae301ee7 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.75.0", + "version": "4.76.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 82fc52958..b4cc35ca9 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.75.0'; // x-release-please-version +export const VERSION = '4.76.0'; // x-release-please-version From c35555790a7cba54517f43e080d2b2dc6d8ea404 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2024 18:32:44 +0000 Subject: [PATCH 053/246] chore(internal): remove unnecessary getRequestClient function (#1215) --- src/core.ts | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/core.ts b/src/core.ts index 0c8e69ffc..803496412 100644 --- a/src/core.ts +++ b/src/core.ts @@ -558,19 +558,13 @@ export abstract class APIClient { const timeout = setTimeout(() => controller.abort(), ms); return ( - this.getRequestClient() - // use undefined this binding; fetch errors if bound to something else in browser/cloudflare - .fetch.call(undefined, url, { signal: controller.signal as any, ...options }) - .finally(() => { - clearTimeout(timeout); - }) + // use undefined this binding; fetch errors if bound to something else in browser/cloudflare + this.fetch.call(undefined, url, { signal: controller.signal as any, ...options }).finally(() => { + clearTimeout(timeout); + }) ); } - protected getRequestClient(): RequestClient { - return { fetch: this.fetch }; - } - private shouldRetry(response: Response): boolean { // Note this is not a standard header. const shouldRetryHeader = response.headers.get('x-should-retry'); From fb4820e04a9d579e9a8913dd98cc29cf32a9a7cc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 13:57:28 +0000 Subject: [PATCH 054/246] chore(internal): bump cross-spawn to v7.0.6 (#1217) Note: it is a dev transitive dependency. --- yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yarn.lock b/yarn.lock index e139e1fbe..f86935095 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1370,9 +1370,9 @@ create-require@^1.1.0: integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ== cross-spawn@^7.0.2, cross-spawn@^7.0.3: - version "7.0.3" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" - integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + version "7.0.6" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" + integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== dependencies: path-key "^3.1.0" shebang-command "^2.0.0" From 6e8c1d06dcf098ec3dabe1128d29b22eee4f4b58 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 13:58:00 +0000 Subject: [PATCH 055/246] release: 4.76.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 13 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 1cc8c9627..10a72c4fa 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.76.0" + ".": "4.76.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e68b45e8a..7ea1f7f7f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 4.76.1 (2024-12-10) + +Full Changelog: [v4.76.0...v4.76.1](https://github.com/openai/openai-node/compare/v4.76.0...v4.76.1) + +### Chores + +* **internal:** bump cross-spawn to v7.0.6 ([#1217](https://github.com/openai/openai-node/issues/1217)) ([c07ad29](https://github.com/openai/openai-node/commit/c07ad298d58e5aeaf816ee3de65fd59bf3fc8b66)) +* **internal:** remove unnecessary getRequestClient function ([#1215](https://github.com/openai/openai-node/issues/1215)) ([bef3925](https://github.com/openai/openai-node/commit/bef392526cd339f45c574bc476649c77be36c612)) + ## 4.76.0 (2024-12-05) Full Changelog: [v4.75.0...v4.76.0](https://github.com/openai/openai-node/compare/v4.75.0...v4.76.0) diff --git a/jsr.json b/jsr.json index 2c6820969..3fa6b07da 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.76.0", + "version": "4.76.1", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index fae301ee7..ddffb2c6a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.76.0", + "version": "4.76.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index b4cc35ca9..4f8b7224e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.76.0'; // x-release-please-version +export const VERSION = '4.76.1'; // x-release-please-version From 94ef9d75f20699e80c81fb0defd31dc62d8d3585 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 10 Dec 2024 22:25:12 +0000 Subject: [PATCH 056/246] chore(types): nicer error class types + jsdocs (#1219) --- src/error.ts | 64 ++++++++++++++++++++-------------------------------- 1 file changed, 24 insertions(+), 40 deletions(-) diff --git a/src/error.ts b/src/error.ts index 72b4f7bfd..f3dc57610 100644 --- a/src/error.ts +++ b/src/error.ts @@ -4,10 +4,17 @@ import { castToError, Headers } from './core'; export class OpenAIError extends Error {} -export class APIError extends OpenAIError { - readonly status: number | undefined; - readonly headers: Headers | undefined; - readonly error: Object | undefined; +export class APIError< + TStatus extends number | undefined = number | undefined, + THeaders extends Headers | undefined = Headers | undefined, + TError extends Object | undefined = Object | undefined, +> extends OpenAIError { + /** HTTP status for the response that caused the error */ + readonly status: TStatus; + /** HTTP headers for the response that caused the error */ + readonly headers: THeaders; + /** JSON body of the response that caused the error */ + readonly error: TError; readonly code: string | null | undefined; readonly param: string | null | undefined; @@ -15,19 +22,14 @@ export class APIError extends OpenAIError { readonly request_id: string | null | undefined; - constructor( - status: number | undefined, - error: Object | undefined, - message: string | undefined, - headers: Headers | undefined, - ) { + constructor(status: TStatus, error: TError, message: string | undefined, headers: THeaders) { super(`${APIError.makeMessage(status, error, message)}`); this.status = status; this.headers = headers; this.request_id = headers?.['x-request-id']; + this.error = error; const data = error as Record; - this.error = data; this.code = data?.['code']; this.param = data?.['param']; this.type = data?.['type']; @@ -60,7 +62,7 @@ export class APIError extends OpenAIError { message: string | undefined, headers: Headers | undefined, ): APIError { - if (!status) { + if (!status || !headers) { return new APIConnectionError({ message, cause: castToError(errorResponse) }); } @@ -102,17 +104,13 @@ export class APIError extends OpenAIError { } } -export class APIUserAbortError extends APIError { - override readonly status: undefined = undefined; - +export class APIUserAbortError extends APIError { constructor({ message }: { message?: string } = {}) { super(undefined, undefined, message || 'Request was aborted.', undefined); } } -export class APIConnectionError extends APIError { - override readonly status: undefined = undefined; - +export class APIConnectionError extends APIError { constructor({ message, cause }: { message?: string | undefined; cause?: Error | undefined }) { super(undefined, undefined, message || 'Connection error.', undefined); // in some environments the 'cause' property is already declared @@ -127,35 +125,21 @@ export class APIConnectionTimeoutError extends APIConnectionError { } } -export class BadRequestError extends APIError { - override readonly status: 400 = 400; -} +export class BadRequestError extends APIError<400, Headers> {} -export class AuthenticationError extends APIError { - override readonly status: 401 = 401; -} +export class AuthenticationError extends APIError<401, Headers> {} -export class PermissionDeniedError extends APIError { - override readonly status: 403 = 403; -} +export class PermissionDeniedError extends APIError<403, Headers> {} -export class NotFoundError extends APIError { - override readonly status: 404 = 404; -} +export class NotFoundError extends APIError<404, Headers> {} -export class ConflictError extends APIError { - override readonly status: 409 = 409; -} +export class ConflictError extends APIError<409, Headers> {} -export class UnprocessableEntityError extends APIError { - override readonly status: 422 = 422; -} +export class UnprocessableEntityError extends APIError<422, Headers> {} -export class RateLimitError extends APIError { - override readonly status: 429 = 429; -} +export class RateLimitError extends APIError<429, Headers> {} -export class InternalServerError extends APIError {} +export class InternalServerError extends APIError {} export class LengthFinishReasonError extends OpenAIError { constructor() { From f13fed4137bbbe2e6e0a83c1820ccdeecb6ddf01 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:17:57 +0000 Subject: [PATCH 057/246] chore(internal): update isAbsoluteURL (#1223) --- src/core.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core.ts b/src/core.ts index 803496412..e1a93f272 100644 --- a/src/core.ts +++ b/src/core.ts @@ -1013,8 +1013,8 @@ export const safeJSON = (text: string) => { } }; -// https://stackoverflow.com/a/19709846 -const startsWithSchemeRegexp = new RegExp('^(?:[a-z]+:)?//', 'i'); +// https://url.spec.whatwg.org/#url-scheme-string +const startsWithSchemeRegexp = /^[a-z][a-z0-9+.-]*:/i; const isAbsoluteURL = (url: string): boolean => { return startsWithSchemeRegexp.test(url); }; From 6608f957b62a734c93c006bade5e3b0b8b577c4c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 05:07:38 +0000 Subject: [PATCH 058/246] release: 4.76.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 13 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 10a72c4fa..47a7d26b6 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.76.1" + ".": "4.76.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ea1f7f7f..27946ddea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 4.76.2 (2024-12-12) + +Full Changelog: [v4.76.1...v4.76.2](https://github.com/openai/openai-node/compare/v4.76.1...v4.76.2) + +### Chores + +* **internal:** update isAbsoluteURL ([#1223](https://github.com/openai/openai-node/issues/1223)) ([e908ed7](https://github.com/openai/openai-node/commit/e908ed759996fb7706baf46d094fc77419423971)) +* **types:** nicer error class types + jsdocs ([#1219](https://github.com/openai/openai-node/issues/1219)) ([576d24c](https://github.com/openai/openai-node/commit/576d24cc4b3d766dfe28a6031bdc24ac1b711655)) + ## 4.76.1 (2024-12-10) Full Changelog: [v4.76.0...v4.76.1](https://github.com/openai/openai-node/compare/v4.76.0...v4.76.1) diff --git a/jsr.json b/jsr.json index 3fa6b07da..101edee15 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.76.1", + "version": "4.76.2", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index ddffb2c6a..53b82f070 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.76.1", + "version": "4.76.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 4f8b7224e..7117b1feb 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.76.1'; // x-release-please-version +export const VERSION = '4.76.2'; // x-release-please-version From 28649f8de711c6379edb6b9e656a9ac3bafdf763 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 13 Dec 2024 09:43:39 +0000 Subject: [PATCH 059/246] chore(internal): better ecosystem test debugging --- ecosystem-tests/cli.ts | 4 ++++ package.json | 3 ++- yarn.lock | 25 +++++++++++++++++++++++++ 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index 4803b47c2..00120e5f9 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -4,6 +4,10 @@ import yargs from 'yargs'; import assert from 'assert'; import path from 'path'; +// @ts-ignore +var SegfaultHandler = require('segfault-handler'); +SegfaultHandler.registerHandler('crash.log'); + const TAR_NAME = 'openai.tgz'; const PACK_FOLDER = '.pack'; const PACK_FILE = `${PACK_FOLDER}/${TAR_NAME}`; diff --git a/package.json b/package.json index 53b82f070..35873e1c1 100644 --- a/package.json +++ b/package.json @@ -41,11 +41,12 @@ "eslint": "^8.49.0", "eslint-plugin-prettier": "^5.0.1", "eslint-plugin-unused-imports": "^3.0.0", - "iconv-lite": "^0.6.3", "fast-check": "^3.22.0", + "iconv-lite": "^0.6.3", "jest": "^29.4.0", "prettier": "^3.0.0", "prettier-2": "npm:prettier@^2", + "segfault-handler": "^1.3.0", "ts-jest": "^29.1.0", "ts-node": "^10.5.0", "tsc-multi": "^1.1.0", diff --git a/yarn.lock b/yarn.lock index f86935095..c0220f984 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1169,6 +1169,13 @@ big-integer@^1.6.44: resolved "https://registry.yarnpkg.com/big-integer/-/big-integer-1.6.52.tgz#60a887f3047614a8e1bffe5d7173490a97dc8c85" integrity sha512-QxD8cf2eVqJOOz63z6JIN9BzvVs/dlySa5HGSBH5xtR8dPteIRQnBxxKqkNTiT6jbDTF6jAfrd4oMcND9RGbQg== +bindings@^1.2.1: + version "1.5.0" + resolved "https://registry.yarnpkg.com/bindings/-/bindings-1.5.0.tgz#10353c9e945334bc0511a6d90b38fbc7c9c504df" + integrity sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ== + dependencies: + file-uri-to-path "1.0.0" + bplist-parser@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/bplist-parser/-/bplist-parser-0.2.0.tgz#43a9d183e5bf9d545200ceac3e712f79ebbe8d0e" @@ -1746,6 +1753,11 @@ file-entry-cache@^6.0.1: dependencies: flat-cache "^3.0.4" +file-uri-to-path@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz#553a7b8446ff6f684359c445f1e37a05dacc33dd" + integrity sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw== + fill-range@^7.1.1: version "7.1.1" resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" @@ -2687,6 +2699,11 @@ ms@^2.0.0, ms@^2.1.3: resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== +nan@^2.14.0: + version "2.22.0" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.22.0.tgz#31bc433fc33213c97bad36404bb68063de604de3" + integrity sha512-nbajikzWTMwsW+eSsNm3QwlOs7het9gGJU5dDZzRTQGk03vyBOauxgI4VakDzE0PtsGTmXPsXTbbjVhRwR5mpw== + natural-compare@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" @@ -3037,6 +3054,14 @@ safe-buffer@~5.2.0: resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== +segfault-handler@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/segfault-handler/-/segfault-handler-1.3.0.tgz#054bc847832fa14f218ba6a79e42877501c8870e" + integrity sha512-p7kVHo+4uoYkr0jmIiTBthwV5L2qmWtben/KDunDZ834mbos+tY+iO0//HpAJpOFSQZZ+wxKWuRo4DxV02B7Lg== + dependencies: + bindings "^1.2.1" + nan "^2.14.0" + semver@^6.3.0, semver@^6.3.1: version "6.3.1" resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" From 3a6bfe41e3b7277032844ff12186d6f0d0f83554 Mon Sep 17 00:00:00 2001 From: Guspan Tanadi <36249910+guspan-tanadi@users.noreply.github.com> Date: Fri, 13 Dec 2024 21:27:45 +0700 Subject: [PATCH 060/246] docs(README): fix helpers section links (#1224) --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index ac9c84a42..b03bcd870 100644 --- a/README.md +++ b/README.md @@ -200,7 +200,7 @@ main(); ``` Streaming with `openai.beta.chat.completions.stream({…})` exposes -[various helpers for your convenience](helpers.md#events) including event handlers and promises. +[various helpers for your convenience](helpers.md#chat-events) including event handlers and promises. Alternatively, you can use `openai.chat.completions.create({ stream: true, … })` which only returns an async iterable of the chunks in the stream and thus uses less memory @@ -285,12 +285,12 @@ main(); // Final content: "It's looking cold and rainy - you might want to wear a jacket!" ``` -Like with `.stream()`, we provide a variety of [helpers and events](helpers.md#events). +Like with `.stream()`, we provide a variety of [helpers and events](helpers.md#chat-events). Note that `runFunctions` was previously available as well, but has been deprecated in favor of `runTools`. Read more about various examples such as with integrating with [zod](helpers.md#integrate-with-zod), -[next.js](helpers.md#integrate-wtih-next-js), and [proxying a stream to the browser](helpers.md#proxy-streaming-to-a-browser). +[next.js](helpers.md#integrate-with-nextjs), and [proxying a stream to the browser](helpers.md#proxy-streaming-to-a-browser). ## File uploads From f361a0c0eb6ae72a902863d6e338f71dc55e416a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Dec 2024 14:28:16 +0000 Subject: [PATCH 061/246] release: 4.76.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 47a7d26b6..52c31fe71 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.76.2" + ".": "4.76.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 27946ddea..4b6f57fe4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.76.3 (2024-12-13) + +Full Changelog: [v4.76.2...v4.76.3](https://github.com/openai/openai-node/compare/v4.76.2...v4.76.3) + +### Chores + +* **internal:** better ecosystem test debugging ([86fc0a8](https://github.com/openai/openai-node/commit/86fc0a81ede2780d3fcebaabff3d9fa9a36cc9c0)) + + +### Documentation + +* **README:** fix helpers section links ([#1224](https://github.com/openai/openai-node/issues/1224)) ([efbe30a](https://github.com/openai/openai-node/commit/efbe30a156cec1836d3db28f663066b33be57ba2)) + ## 4.76.2 (2024-12-12) Full Changelog: [v4.76.1...v4.76.2](https://github.com/openai/openai-node/compare/v4.76.1...v4.76.2) diff --git a/jsr.json b/jsr.json index 101edee15..ef9ce6848 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.76.2", + "version": "4.76.3", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 35873e1c1..47f363ba1 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.76.2", + "version": "4.76.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 7117b1feb..01cd56405 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.76.2'; // x-release-please-version +export const VERSION = '4.76.3'; // x-release-please-version From bd1a82dc8f867c271fc6f226c7d98f8de439ab7c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 17:26:31 +0000 Subject: [PATCH 062/246] chore(internal): fix some typos (#1227) --- src/core.ts | 4 ++-- tests/index.test.ts | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core.ts b/src/core.ts index e1a93f272..68f1e676a 100644 --- a/src/core.ts +++ b/src/core.ts @@ -198,7 +198,7 @@ export abstract class APIClient { maxRetries = 2, timeout = 600000, // 10 minutes httpAgent, - fetch: overridenFetch, + fetch: overriddenFetch, }: { baseURL: string; maxRetries?: number | undefined; @@ -211,7 +211,7 @@ export abstract class APIClient { this.timeout = validatePositiveInteger('timeout', timeout); this.httpAgent = httpAgent; - this.fetch = overridenFetch ?? fetch; + this.fetch = overriddenFetch ?? fetch; } protected authHeaders(opts: FinalRequestOptions): Headers { diff --git a/tests/index.test.ts b/tests/index.test.ts index f39571121..bf113e7bb 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -177,7 +177,7 @@ describe('instantiate client', () => { expect(client.apiKey).toBe('My API Key'); }); - test('with overriden environment variable arguments', () => { + test('with overridden environment variable arguments', () => { // set options via env var process.env['OPENAI_API_KEY'] = 'another My API Key'; const client = new OpenAI({ apiKey: 'My API Key' }); From 4984aaccbddcd05349c0c47c608b387b5b1f7ef6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 17:56:30 +0000 Subject: [PATCH 063/246] feat(api): new o1 and GPT-4o models + preference fine-tuning (#1229) learn more here: https://platform.openai.com/docs/changelog --- .stats.yml | 2 +- api.md | 2 + src/index.ts | 4 + src/resources/chat/chat.ts | 11 +- src/resources/chat/completions.ts | 96 +++++-- src/resources/chat/index.ts | 2 + src/resources/fine-tuning/jobs/jobs.ts | 270 +++++++++++++++++- tests/api-resources/chat/completions.test.ts | 5 +- .../fine-tuning/jobs/jobs.test.ts | 14 + 9 files changed, 372 insertions(+), 34 deletions(-) diff --git a/.stats.yml b/.stats.yml index 3cc042fe0..d4d7d3c40 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2e0e0678be19d1118fd796af291822075e40538dba326611e177e9f3dc245a53.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-779ea2754025daf5e18eb8ceb203ec321692636bc3a999338556a479178efa6c.yml diff --git a/api.md b/api.md index 465730de8..54bcf08d7 100644 --- a/api.md +++ b/api.md @@ -41,6 +41,7 @@ Types: - ChatCompletionContentPartInputAudio - ChatCompletionContentPartRefusal - ChatCompletionContentPartText +- ChatCompletionDeveloperMessageParam - ChatCompletionFunctionCallOption - ChatCompletionFunctionMessageParam - ChatCompletionMessage @@ -49,6 +50,7 @@ Types: - ChatCompletionModality - ChatCompletionNamedToolChoice - ChatCompletionPredictionContent +- ChatCompletionReasoningEffort - ChatCompletionRole - ChatCompletionStreamOptions - ChatCompletionSystemMessageParam diff --git a/src/index.ts b/src/index.ts index 58d7410e4..2320850fb 100644 --- a/src/index.ts +++ b/src/index.ts @@ -80,6 +80,7 @@ import { ChatCompletionCreateParams, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, + ChatCompletionDeveloperMessageParam, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, ChatCompletionMessage, @@ -88,6 +89,7 @@ import { ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionPredictionContent, + ChatCompletionReasoningEffort, ChatCompletionRole, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, @@ -353,6 +355,7 @@ export declare namespace OpenAI { type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, type ChatCompletionMessage as ChatCompletionMessage, @@ -361,6 +364,7 @@ export declare namespace OpenAI { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 09cd3d123..2230b19bd 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -16,6 +16,7 @@ import { ChatCompletionCreateParams, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, + ChatCompletionDeveloperMessageParam, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, ChatCompletionMessage, @@ -24,6 +25,7 @@ import { ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionPredictionContent, + ChatCompletionReasoningEffort, ChatCompletionRole, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, @@ -44,6 +46,8 @@ export class Chat extends APIResource { } export type ChatModel = + | 'o1' + | 'o1-2024-12-17' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o1-mini' @@ -52,10 +56,11 @@ export type ChatModel = | 'gpt-4o-2024-11-20' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-05-13' - | 'gpt-4o-realtime-preview' - | 'gpt-4o-realtime-preview-2024-10-01' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' + | 'gpt-4o-audio-preview-2024-12-17' + | 'gpt-4o-mini-audio-preview' + | 'gpt-4o-mini-audio-preview-2024-12-17' | 'chatgpt-4o-latest' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' @@ -96,6 +101,7 @@ export declare namespace Chat { type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, type ChatCompletionMessage as ChatCompletionMessage, @@ -104,6 +110,7 @@ export declare namespace Chat { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 8e9a4385e..31f5814cb 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -15,6 +15,12 @@ export class Completions extends APIResource { * [text generation](https://platform.openai.com/docs/guides/text-generation), * [vision](https://platform.openai.com/docs/guides/vision), and * [audio](https://platform.openai.com/docs/guides/audio) guides. + * + * Parameter support can differ depending on the model used to generate the + * response, particularly for newer reasoning models. Parameters that are only + * supported for reasoning models are noted below. For the current state of + * unsupported parameters in reasoning models, + * [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). */ create( body: ChatCompletionCreateParamsNonStreaming, @@ -135,6 +141,9 @@ export namespace ChatCompletion { } } +/** + * Messages sent by the model in response to user messages. + */ export interface ChatCompletionAssistantMessageParam { /** * The role of the messages author, in this case `assistant`. @@ -530,6 +539,29 @@ export interface ChatCompletionContentPartText { type: 'text'; } +/** + * Developer-provided instructions that the model should follow, regardless of + * messages sent by the user. With o1 models and newer, `developer` messages + * replace the previous `system` messages. + */ +export interface ChatCompletionDeveloperMessageParam { + /** + * The contents of the developer message. + */ + content: string | Array; + + /** + * The role of the messages author, in this case `developer`. + */ + role: 'developer'; + + /** + * An optional name for the participant. Provides the model information to + * differentiate between participants of the same role. + */ + name?: string; +} + /** * Specifying a particular function via `{"name": "my_function"}` forces the model * to call that function. @@ -620,7 +652,13 @@ export namespace ChatCompletionMessage { } } +/** + * Developer-provided instructions that the model should follow, regardless of + * messages sent by the user. With o1 models and newer, `developer` messages + * replace the previous `system` messages. + */ export type ChatCompletionMessageParam = + | ChatCompletionDeveloperMessageParam | ChatCompletionSystemMessageParam | ChatCompletionUserMessageParam | ChatCompletionAssistantMessageParam @@ -707,6 +745,16 @@ export interface ChatCompletionPredictionContent { type: 'content'; } +/** + * **o1 models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ +export type ChatCompletionReasoningEffort = 'low' | 'medium' | 'high'; + /** * The role of the author of a message */ @@ -725,6 +773,11 @@ export interface ChatCompletionStreamOptions { include_usage?: boolean; } +/** + * Developer-provided instructions that the model should follow, regardless of + * messages sent by the user. With o1 models and newer, use `developer` messages + * for this purpose instead. + */ export interface ChatCompletionSystemMessageParam { /** * The contents of the system message. @@ -835,6 +888,10 @@ export interface ChatCompletionToolMessageParam { tool_call_id: string; } +/** + * Messages sent by an end user, containing prompts or additional context + * information. + */ export interface ChatCompletionUserMessageParam { /** * The contents of the user message. @@ -891,20 +948,22 @@ export interface ChatCompletionCreateParamsBase { * Number between -2.0 and 2.0. Positive values penalize new tokens based on their * existing frequency in the text so far, decreasing the model's likelihood to * repeat the same line verbatim. - * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) */ frequency_penalty?: number | null; /** * Deprecated in favor of `tool_choice`. * - * Controls which (if any) function is called by the model. `none` means the model - * will not call a function and instead generates a message. `auto` means the model - * can pick between generating a message or calling a function. Specifying a - * particular function via `{"name": "my_function"}` forces the model to call that + * Controls which (if any) function is called by the model. + * + * `none` means the model will not call a function and instead generates a message. + * + * `auto` means the model can pick between generating a message or calling a * function. * + * Specifying a particular function via `{"name": "my_function"}` forces the model + * to call that function. + * * `none` is the default when no functions are present. `auto` is the default if * functions are present. */ @@ -998,17 +1057,21 @@ export interface ChatCompletionCreateParamsBase { * Number between -2.0 and 2.0. Positive values penalize new tokens based on * whether they appear in the text so far, increasing the model's likelihood to * talk about new topics. - * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) */ presence_penalty?: number | null; /** - * An object specifying the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - * [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), - * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and - * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * **o1 models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + reasoning_effort?: ChatCompletionReasoningEffort; + + /** + * An object specifying the format that the model must output. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured * Outputs which ensures the model will match your supplied JSON schema. Learn more @@ -1088,9 +1151,8 @@ export interface ChatCompletionCreateParamsBase { /** * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will * make the output more random, while lower values like 0.2 will make it more - * focused and deterministic. - * - * We generally recommend altering this or `top_p` but not both. + * focused and deterministic. We generally recommend altering this or `top_p` but + * not both. */ temperature?: number | null; @@ -1223,6 +1285,7 @@ export declare namespace Completions { type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, type ChatCompletionMessage as ChatCompletionMessage, @@ -1231,6 +1294,7 @@ export declare namespace Completions { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index 262bf75a2..c3be19402 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -13,6 +13,7 @@ export { type ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal, type ChatCompletionContentPartText, + type ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam, type ChatCompletionMessage, @@ -21,6 +22,7 @@ export { type ChatCompletionModality, type ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent, + type ChatCompletionReasoningEffort, type ChatCompletionRole, type ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam, diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 0c320e028..44dd011aa 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -127,9 +127,8 @@ export interface FineTuningJob { finished_at: number | null; /** - * The hyperparameters used for the fine-tuning job. See the - * [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for - * more details. + * The hyperparameters used for the fine-tuning job. This value will only be + * returned when running `supervised` jobs. */ hyperparameters: FineTuningJob.Hyperparameters; @@ -195,6 +194,11 @@ export interface FineTuningJob { * A list of integrations to enable for this fine-tuning job. */ integrations?: Array | null; + + /** + * The method used for fine-tuning. + */ + method?: FineTuningJob.Method; } export namespace FineTuningJob { @@ -221,18 +225,125 @@ export namespace FineTuningJob { } /** - * The hyperparameters used for the fine-tuning job. See the - * [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for - * more details. + * The hyperparameters used for the fine-tuning job. This value will only be + * returned when running `supervised` jobs. */ export interface Hyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + /** * The number of epochs to train the model for. An epoch refers to one full cycle - * through the training dataset. "auto" decides the optimal number of epochs based - * on the size of the dataset. If setting the number manually, we support any - * number between 1 and 50 epochs. + * through the training dataset. + */ + n_epochs?: 'auto' | number; + } + + /** + * The method used for fine-tuning. + */ + export interface Method { + /** + * Configuration for the DPO fine-tuning method. + */ + dpo?: Method.Dpo; + + /** + * Configuration for the supervised fine-tuning method. */ - n_epochs: 'auto' | number; + supervised?: Method.Supervised; + + /** + * The type of method. Is either `supervised` or `dpo`. + */ + type?: 'supervised' | 'dpo'; + } + + export namespace Method { + /** + * Configuration for the DPO fine-tuning method. + */ + export interface Dpo { + /** + * The hyperparameters used for the fine-tuning job. + */ + hyperparameters?: Dpo.Hyperparameters; + } + + export namespace Dpo { + /** + * The hyperparameters used for the fine-tuning job. + */ + export interface Hyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * The beta value for the DPO method. A higher beta value will increase the weight + * of the penalty between the policy and reference model. + */ + beta?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; + } + } + + /** + * Configuration for the supervised fine-tuning method. + */ + export interface Supervised { + /** + * The hyperparameters used for the fine-tuning job. + */ + hyperparameters?: Supervised.Hyperparameters; + } + + export namespace Supervised { + /** + * The hyperparameters used for the fine-tuning job. + */ + export interface Hyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; + } + } } } @@ -240,15 +351,40 @@ export namespace FineTuningJob { * Fine-tuning job event object */ export interface FineTuningJobEvent { + /** + * The object identifier. + */ id: string; + /** + * The Unix timestamp (in seconds) for when the fine-tuning job was created. + */ created_at: number; + /** + * The log level of the event. + */ level: 'info' | 'warn' | 'error'; + /** + * The message of the event. + */ message: string; + /** + * The object type, which is always "fine_tuning.job.event". + */ object: 'fine_tuning.job.event'; + + /** + * The data associated with the event. + */ + data?: unknown; + + /** + * The type of event. + */ + type?: 'message' | 'metrics'; } export type FineTuningJobIntegration = FineTuningJobWandbIntegrationObject; @@ -318,8 +454,10 @@ export interface JobCreateParams { * your file with the purpose `fine-tune`. * * The contents of the file should differ depending on if the model uses the - * [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + * [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), * [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + * format, or if the fine-tuning method uses the + * [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) * format. * * See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) @@ -328,7 +466,8 @@ export interface JobCreateParams { training_file: string; /** - * The hyperparameters used for the fine-tuning job. + * The hyperparameters used for the fine-tuning job. This value is now deprecated + * in favor of `method`, and should be passed in under the `method` parameter. */ hyperparameters?: JobCreateParams.Hyperparameters; @@ -337,6 +476,11 @@ export interface JobCreateParams { */ integrations?: Array | null; + /** + * The method used for fine-tuning. + */ + method?: JobCreateParams.Method; + /** * The seed controls the reproducibility of the job. Passing in the same seed and * job parameters should produce the same results, but may differ in rare cases. If @@ -372,7 +516,9 @@ export interface JobCreateParams { export namespace JobCreateParams { /** - * The hyperparameters used for the fine-tuning job. + * @deprecated: The hyperparameters used for the fine-tuning job. This value is now + * deprecated in favor of `method`, and should be passed in under the `method` + * parameter. */ export interface Hyperparameters { /** @@ -444,6 +590,104 @@ export namespace JobCreateParams { tags?: Array; } } + + /** + * The method used for fine-tuning. + */ + export interface Method { + /** + * Configuration for the DPO fine-tuning method. + */ + dpo?: Method.Dpo; + + /** + * Configuration for the supervised fine-tuning method. + */ + supervised?: Method.Supervised; + + /** + * The type of method. Is either `supervised` or `dpo`. + */ + type?: 'supervised' | 'dpo'; + } + + export namespace Method { + /** + * Configuration for the DPO fine-tuning method. + */ + export interface Dpo { + /** + * The hyperparameters used for the fine-tuning job. + */ + hyperparameters?: Dpo.Hyperparameters; + } + + export namespace Dpo { + /** + * The hyperparameters used for the fine-tuning job. + */ + export interface Hyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * The beta value for the DPO method. A higher beta value will increase the weight + * of the penalty between the policy and reference model. + */ + beta?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; + } + } + + /** + * Configuration for the supervised fine-tuning method. + */ + export interface Supervised { + /** + * The hyperparameters used for the fine-tuning job. + */ + hyperparameters?: Supervised.Hyperparameters; + } + + export namespace Supervised { + /** + * The hyperparameters used for the fine-tuning job. + */ + export interface Hyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; + } + } + } } export interface JobListParams extends CursorPageParams {} diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 5dcbf9ad6..dfc09f69b 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -11,7 +11,7 @@ const client = new OpenAI({ describe('resource completions', () => { test('create: only required params', async () => { const responsePromise = client.chat.completions.create({ - messages: [{ content: 'string', role: 'system' }], + messages: [{ content: 'string', role: 'developer' }], model: 'gpt-4o', }); const rawResponse = await responsePromise.asResponse(); @@ -25,7 +25,7 @@ describe('resource completions', () => { test('create: required and optional params', async () => { const response = await client.chat.completions.create({ - messages: [{ content: 'string', role: 'system', name: 'name' }], + messages: [{ content: 'string', role: 'developer', name: 'name' }], model: 'gpt-4o', audio: { format: 'wav', voice: 'alloy' }, frequency_penalty: -2, @@ -41,6 +41,7 @@ describe('resource completions', () => { parallel_tool_calls: true, prediction: { content: 'string', type: 'content' }, presence_penalty: -2, + reasoning_effort: 'low', response_format: { type: 'text' }, seed: -9007199254740991, service_tier: 'auto', diff --git a/tests/api-resources/fine-tuning/jobs/jobs.test.ts b/tests/api-resources/fine-tuning/jobs/jobs.test.ts index 0ab09768a..4de83a8b7 100644 --- a/tests/api-resources/fine-tuning/jobs/jobs.test.ts +++ b/tests/api-resources/fine-tuning/jobs/jobs.test.ts @@ -34,6 +34,20 @@ describe('resource jobs', () => { wandb: { project: 'my-wandb-project', entity: 'entity', name: 'name', tags: ['custom-tag'] }, }, ], + method: { + dpo: { + hyperparameters: { + batch_size: 'auto', + beta: 'auto', + learning_rate_multiplier: 'auto', + n_epochs: 'auto', + }, + }, + supervised: { + hyperparameters: { batch_size: 'auto', learning_rate_multiplier: 'auto', n_epochs: 'auto' }, + }, + type: 'supervised', + }, seed: 42, suffix: 'x', validation_file: 'file-abc123', From 13fa61db5e3407be684868db488c46d95f6805bf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 18:05:03 +0000 Subject: [PATCH 064/246] chore(internal): spec update (#1230) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index d4d7d3c40..7b5235e3c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-779ea2754025daf5e18eb8ceb203ec321692636bc3a999338556a479178efa6c.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0d64ca9e45f51b4279f87b205eeb3a3576df98407698ce053f2e2302c1c08df1.yml From 21c3ed6db585433880c2462b3c62abcbe7ec6903 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 18:05:30 +0000 Subject: [PATCH 065/246] release: 4.77.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 52c31fe71..6b843f931 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.76.3" + ".": "4.77.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b6f57fe4..d33ce4c1a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.77.0 (2024-12-17) + +Full Changelog: [v4.76.3...v4.77.0](https://github.com/openai/openai-node/compare/v4.76.3...v4.77.0) + +### Features + +* **api:** new o1 and GPT-4o models + preference fine-tuning ([#1229](https://github.com/openai/openai-node/issues/1229)) ([2e872d4](https://github.com/openai/openai-node/commit/2e872d4ac3717ab8f61741efffb7a31acd798338)) + + +### Chores + +* **internal:** fix some typos ([#1227](https://github.com/openai/openai-node/issues/1227)) ([d51fcfe](https://github.com/openai/openai-node/commit/d51fcfe3a66550a684eeeb0e6f17e1d9825cdf78)) +* **internal:** spec update ([#1230](https://github.com/openai/openai-node/issues/1230)) ([ed2b61d](https://github.com/openai/openai-node/commit/ed2b61d32703b64d9f91223bc02627a607f60483)) + ## 4.76.3 (2024-12-13) Full Changelog: [v4.76.2...v4.76.3](https://github.com/openai/openai-node/compare/v4.76.2...v4.76.3) diff --git a/jsr.json b/jsr.json index ef9ce6848..d76a2040e 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.76.3", + "version": "4.77.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 47f363ba1..54633aa5d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.76.3", + "version": "4.77.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 01cd56405..fdf4e5224 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.76.3'; // x-release-please-version +export const VERSION = '4.77.0'; // x-release-please-version From d70f6e835be4cef980e8e4026ec709177d3d3931 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Dec 2024 22:16:46 +0000 Subject: [PATCH 066/246] chore(internal): spec update (#1231) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 7b5235e3c..248cc366d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0d64ca9e45f51b4279f87b205eeb3a3576df98407698ce053f2e2302c1c08df1.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-a39aca84ed97ebafb707ebd5221e2787c5a42ff3d98f2ffaea8a0dcd84cbcbcb.yml From 0f715f281715da744e01c2c08932008d0cfde614 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 19 Dec 2024 22:07:12 +0000 Subject: [PATCH 067/246] fix(client): normalize method (#1235) --- src/core.ts | 12 +++++++++++- tests/index.test.ts | 13 +++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/core.ts b/src/core.ts index 68f1e676a..972cceaec 100644 --- a/src/core.ts +++ b/src/core.ts @@ -557,9 +557,19 @@ export abstract class APIClient { const timeout = setTimeout(() => controller.abort(), ms); + const fetchOptions = { + signal: controller.signal as any, + ...options, + }; + if (fetchOptions.method) { + // Custom methods like 'patch' need to be uppercased + // See https://github.com/nodejs/undici/issues/2294 + fetchOptions.method = fetchOptions.method.toUpperCase(); + } + return ( // use undefined this binding; fetch errors if bound to something else in browser/cloudflare - this.fetch.call(undefined, url, { signal: controller.signal as any, ...options }).finally(() => { + this.fetch.call(undefined, url, fetchOptions).finally(() => { clearTimeout(timeout); }) ); diff --git a/tests/index.test.ts b/tests/index.test.ts index bf113e7bb..a6f0040a4 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -122,6 +122,19 @@ describe('instantiate client', () => { expect(spy).toHaveBeenCalledTimes(1); }); + test('normalized method', async () => { + let capturedRequest: RequestInit | undefined; + const testFetch = async (url: RequestInfo, init: RequestInit = {}): Promise => { + capturedRequest = init; + return new Response(JSON.stringify({}), { headers: { 'Content-Type': 'application/json' } }); + }; + + const client = new OpenAI({ baseURL: 'http://localhost:5000/', apiKey: 'My API Key', fetch: testFetch }); + + await client.patch('/foo'); + expect(capturedRequest?.method).toEqual('PATCH'); + }); + describe('baseUrl', () => { test('trailing slash', () => { const client = new OpenAI({ baseURL: 'http://localhost:5000/custom/path/', apiKey: 'My API Key' }); From 4df92af7cace12c2134fbfb3db1ed5887dec0a4c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 20 Dec 2024 11:54:17 +0000 Subject: [PATCH 068/246] docs: minor formatting changes (#1236) --- CONTRIBUTING.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e8bbc1b07..dde09d52d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ ## Setting up the environment -This repository uses [`yarn@v1`](https://classic.yarnpkg.com/lang/en/docs/install/#mac-stable). +This repository uses [`yarn@v1`](https://classic.yarnpkg.com/lang/en/docs/install). Other package managers may work but are not officially supported for development. To set up the repository, run: @@ -29,10 +29,10 @@ All files in the `examples/` directory are not modified by the generator and can … ``` -``` -chmod +x examples/.ts +```sh +$ chmod +x examples/.ts # run the example against your api -yarn tsn -T examples/.ts +$ yarn tsn -T examples/.ts ``` ## Using the repository from source From b6e4d947c69d255cd332bf247a2faedf578438c4 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 20 Dec 2024 21:28:36 +0000 Subject: [PATCH 069/246] docs(readme): add alpha callout --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index b03bcd870..c926688f0 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,9 @@ +> [!IMPORTANT] +> We're actively working on a new alpha version that migrates from `node-fetch` to builtin fetch. +> +> Please try it out and let us know if you run into any issues! +> https://community.openai.com/t/your-feedback-requested-node-js-sdk-5-0-0-alpha/1063774 + # OpenAI TypeScript and JavaScript API Library [![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/openai) [![JSR Version](https://jsr.io/badges/@openai/openai)](https://jsr.io/@openai/openai) From 3fdc7d4f67a6ceea51723684dcc0bc1895088259 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 21 Dec 2024 05:06:15 +0000 Subject: [PATCH 070/246] release: 4.77.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 23 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6b843f931..40491ea3b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.77.0" + ".": "4.77.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d33ce4c1a..e2ed8756c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 4.77.1 (2024-12-21) + +Full Changelog: [v4.77.0...v4.77.1](https://github.com/openai/openai-node/compare/v4.77.0...v4.77.1) + +### Bug Fixes + +* **client:** normalize method ([#1235](https://github.com/openai/openai-node/issues/1235)) ([4a213da](https://github.com/openai/openai-node/commit/4a213dad6f2104dc02a75724acc62134d25db472)) + + +### Chores + +* **internal:** spec update ([#1231](https://github.com/openai/openai-node/issues/1231)) ([a97ea73](https://github.com/openai/openai-node/commit/a97ea73cafcb56e94be7ff691c4022da575cf60e)) + + +### Documentation + +* minor formatting changes ([#1236](https://github.com/openai/openai-node/issues/1236)) ([6387968](https://github.com/openai/openai-node/commit/63879681ccaca3dc1e17b27464e2f830b8f63b4f)) +* **readme:** add alpha callout ([f2eff37](https://github.com/openai/openai-node/commit/f2eff3780e1216f7f420f7b86d47f4e21986b10e)) + ## 4.77.0 (2024-12-17) Full Changelog: [v4.76.3...v4.77.0](https://github.com/openai/openai-node/compare/v4.76.3...v4.77.0) diff --git a/jsr.json b/jsr.json index d76a2040e..f80d0a575 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.77.0", + "version": "4.77.1", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 54633aa5d..44030acd3 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.77.0", + "version": "4.77.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index fdf4e5224..a7b84d0c2 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.77.0'; // x-release-please-version +export const VERSION = '4.77.1'; // x-release-please-version From c0ae6fc24957b1c3962b75f1d17c0d85ea2b298c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 2 Jan 2025 01:44:21 +0000 Subject: [PATCH 071/246] chore: bump license year (#1246) --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 621a6becf..f011417af 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2024 OpenAI + Copyright 2025 OpenAI Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From 9acdf8a3c44a4cce785b9dfa9efd32f8c47fa6d5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 2 Jan 2025 05:06:49 +0000 Subject: [PATCH 072/246] release: 4.77.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 40491ea3b..c5f60a579 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.77.1" + ".": "4.77.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e2ed8756c..d5143492e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.77.2 (2025-01-02) + +Full Changelog: [v4.77.1...v4.77.2](https://github.com/openai/openai-node/compare/v4.77.1...v4.77.2) + +### Chores + +* bump license year ([#1246](https://github.com/openai/openai-node/issues/1246)) ([13197c1](https://github.com/openai/openai-node/commit/13197c1698f492529bd00b62d95f83c039ef0ac7)) + ## 4.77.1 (2024-12-21) Full Changelog: [v4.77.0...v4.77.1](https://github.com/openai/openai-node/compare/v4.77.0...v4.77.1) diff --git a/jsr.json b/jsr.json index f80d0a575..6e735f0b0 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.77.1", + "version": "4.77.2", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 44030acd3..947aad8d4 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.77.1", + "version": "4.77.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index a7b84d0c2..e1984f01e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.77.1'; // x-release-please-version +export const VERSION = '4.77.2'; // x-release-please-version From db16121d2db5a3104c2b9d85c6b7b3281f6f6299 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 3 Jan 2025 22:57:05 +0000 Subject: [PATCH 073/246] chore(api): bump spec version (#1248) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 248cc366d..d223c8f1f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-a39aca84ed97ebafb707ebd5221e2787c5a42ff3d98f2ffaea8a0dcd84cbcbcb.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-02200a58ed631064b6419711da99fefd6e97bdbbeb577a80a1a6e0c8dbcb18f5.yml From f4066e1af907586946a5e6befee9459268425680 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 3 Jan 2025 22:57:37 +0000 Subject: [PATCH 074/246] release: 4.77.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index c5f60a579..e98ace9d7 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.77.2" + ".": "4.77.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d5143492e..1f928b366 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.77.3 (2025-01-03) + +Full Changelog: [v4.77.2...v4.77.3](https://github.com/openai/openai-node/compare/v4.77.2...v4.77.3) + +### Chores + +* **api:** bump spec version ([#1248](https://github.com/openai/openai-node/issues/1248)) ([37b3df9](https://github.com/openai/openai-node/commit/37b3df9ac6af76fea6eace8307aab9f0565e5660)) + ## 4.77.2 (2025-01-02) Full Changelog: [v4.77.1...v4.77.2](https://github.com/openai/openai-node/compare/v4.77.1...v4.77.2) diff --git a/jsr.json b/jsr.json index 6e735f0b0..57eb55bf8 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.77.2", + "version": "4.77.3", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 947aad8d4..2ad833206 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.77.2", + "version": "4.77.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index e1984f01e..81ee8f0d6 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.77.2'; // x-release-please-version +export const VERSION = '4.77.3'; // x-release-please-version From 017d6010d40e84ea390293a46485beefcc8d386c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 8 Jan 2025 15:22:26 +0000 Subject: [PATCH 075/246] docs(readme): fix misplaced period (#1252) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c926688f0..3039857a1 100644 --- a/README.md +++ b/README.md @@ -631,7 +631,7 @@ await client.models.list({ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: 1. Changes that only affect static types, without breaking runtime behavior. -2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals)_. +2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals.)_ 3. Changes that we do not expect to impact the vast majority of users in practice. We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. From bb6ac193b7d9d45155d7e7bc93d40ea0a79645cc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 8 Jan 2025 15:23:01 +0000 Subject: [PATCH 076/246] release: 4.77.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e98ace9d7..e66c326a9 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.77.3" + ".": "4.77.4" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f928b366..7a811f188 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.77.4 (2025-01-08) + +Full Changelog: [v4.77.3...v4.77.4](https://github.com/openai/openai-node/compare/v4.77.3...v4.77.4) + +### Documentation + +* **readme:** fix misplaced period ([#1252](https://github.com/openai/openai-node/issues/1252)) ([c2fe465](https://github.com/openai/openai-node/commit/c2fe46522d59d1611ba8bb2b7e070f9be7264df0)) + ## 4.77.3 (2025-01-03) Full Changelog: [v4.77.2...v4.77.3](https://github.com/openai/openai-node/compare/v4.77.2...v4.77.3) diff --git a/jsr.json b/jsr.json index 57eb55bf8..da442da31 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.77.3", + "version": "4.77.4", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 2ad833206..453859b6b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.77.3", + "version": "4.77.4", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 81ee8f0d6..7f6adc9bc 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.77.3'; // x-release-please-version +export const VERSION = '4.77.4'; // x-release-please-version From d3736066a0a277ba544617cbf8d2ea057a9f0ecf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 8 Jan 2025 16:58:16 +0000 Subject: [PATCH 077/246] feat(client): add realtime types (#1254) note this just defines types, there is no websocket interface provided yet --- .stats.yml | 4 +- api.md | 60 + src/resources/beta/beta.ts | 6 + src/resources/beta/index.ts | 1 + src/resources/beta/realtime/index.ts | 4 + src/resources/beta/realtime/realtime.ts | 1904 +++++++++++++++++ src/resources/beta/realtime/sessions.ts | 546 +++++ .../beta/realtime/sessions.test.ts | 45 + 8 files changed, 2568 insertions(+), 2 deletions(-) create mode 100644 src/resources/beta/realtime/index.ts create mode 100644 src/resources/beta/realtime/realtime.ts create mode 100644 src/resources/beta/realtime/sessions.ts create mode 100644 tests/api-resources/beta/realtime/sessions.test.ts diff --git a/.stats.yml b/.stats.yml index d223c8f1f..9600edae3 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-02200a58ed631064b6419711da99fefd6e97bdbbeb577a80a1a6e0c8dbcb18f5.yml +configured_endpoints: 69 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b5b0e2c794b012919701c3fd43286af10fa25d33ceb8a881bec2636028f446e0.yml diff --git a/api.md b/api.md index 54bcf08d7..a885628a3 100644 --- a/api.md +++ b/api.md @@ -213,6 +213,66 @@ Methods: # Beta +## Realtime + +Types: + +- ConversationCreatedEvent +- ConversationItem +- ConversationItemContent +- ConversationItemCreateEvent +- ConversationItemCreatedEvent +- ConversationItemDeleteEvent +- ConversationItemDeletedEvent +- ConversationItemInputAudioTranscriptionCompletedEvent +- ConversationItemInputAudioTranscriptionFailedEvent +- ConversationItemTruncateEvent +- ConversationItemTruncatedEvent +- ErrorEvent +- InputAudioBufferAppendEvent +- InputAudioBufferClearEvent +- InputAudioBufferClearedEvent +- InputAudioBufferCommitEvent +- InputAudioBufferCommittedEvent +- InputAudioBufferSpeechStartedEvent +- InputAudioBufferSpeechStoppedEvent +- RateLimitsUpdatedEvent +- RealtimeClientEvent +- RealtimeResponse +- RealtimeResponseStatus +- RealtimeResponseUsage +- RealtimeServerEvent +- ResponseAudioDeltaEvent +- ResponseAudioDoneEvent +- ResponseAudioTranscriptDeltaEvent +- ResponseAudioTranscriptDoneEvent +- ResponseCancelEvent +- ResponseContentPartAddedEvent +- ResponseContentPartDoneEvent +- ResponseCreateEvent +- ResponseCreatedEvent +- ResponseDoneEvent +- ResponseFunctionCallArgumentsDeltaEvent +- ResponseFunctionCallArgumentsDoneEvent +- ResponseOutputItemAddedEvent +- ResponseOutputItemDoneEvent +- ResponseTextDeltaEvent +- ResponseTextDoneEvent +- SessionCreatedEvent +- SessionUpdateEvent +- SessionUpdatedEvent + +### Sessions + +Types: + +- Session +- SessionCreateResponse + +Methods: + +- client.beta.realtime.sessions.create({ ...params }) -> SessionCreateResponse + ## VectorStores Types: diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index b904abe4a..ccd043243 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -21,6 +21,8 @@ import { RunStreamEvent, ThreadStreamEvent, } from './assistants'; +import * as RealtimeAPI from './realtime/realtime'; +import { Realtime } from './realtime/realtime'; import * as ThreadsAPI from './threads/threads'; import { AssistantResponseFormatOption, @@ -58,12 +60,14 @@ import { import { Chat } from './chat/chat'; export class Beta extends APIResource { + realtime: RealtimeAPI.Realtime = new RealtimeAPI.Realtime(this._client); vectorStores: VectorStoresAPI.VectorStores = new VectorStoresAPI.VectorStores(this._client); chat: ChatAPI.Chat = new ChatAPI.Chat(this._client); assistants: AssistantsAPI.Assistants = new AssistantsAPI.Assistants(this._client); threads: ThreadsAPI.Threads = new ThreadsAPI.Threads(this._client); } +Beta.Realtime = Realtime; Beta.VectorStores = VectorStores; Beta.VectorStoresPage = VectorStoresPage; Beta.Assistants = Assistants; @@ -71,6 +75,8 @@ Beta.AssistantsPage = AssistantsPage; Beta.Threads = Threads; export declare namespace Beta { + export { Realtime as Realtime }; + export { VectorStores as VectorStores, type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index d7111288f..aa2e52d4c 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -19,6 +19,7 @@ export { type AssistantListParams, } from './assistants'; export { Beta } from './beta'; +export { Realtime } from './realtime/index'; export { Chat } from './chat/index'; export { Threads, diff --git a/src/resources/beta/realtime/index.ts b/src/resources/beta/realtime/index.ts new file mode 100644 index 000000000..66c3ecaae --- /dev/null +++ b/src/resources/beta/realtime/index.ts @@ -0,0 +1,4 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Realtime } from './realtime'; +export { Sessions, type Session, type SessionCreateResponse, type SessionCreateParams } from './sessions'; diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts new file mode 100644 index 000000000..5de06917a --- /dev/null +++ b/src/resources/beta/realtime/realtime.ts @@ -0,0 +1,1904 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as RealtimeAPI from './realtime'; +import * as SessionsAPI from './sessions'; +import { + Session as SessionsAPISession, + SessionCreateParams, + SessionCreateResponse, + Sessions, +} from './sessions'; + +export class Realtime extends APIResource { + sessions: SessionsAPI.Sessions = new SessionsAPI.Sessions(this._client); +} + +/** + * Returned when a conversation is created. Emitted right after session creation. + */ +export interface ConversationCreatedEvent { + /** + * The conversation resource. + */ + conversation: ConversationCreatedEvent.Conversation; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The event type, must be `conversation.created`. + */ + type: 'conversation.created'; +} + +export namespace ConversationCreatedEvent { + /** + * The conversation resource. + */ + export interface Conversation { + /** + * The unique ID of the conversation. + */ + id?: string; + + /** + * The object type, must be `realtime.conversation`. + */ + object?: 'realtime.conversation'; + } +} + +/** + * The item to add to the conversation. + */ +export interface ConversationItem { + /** + * The unique ID of the item, this can be generated by the client to help manage + * server-side context, but is not required because the server will generate one if + * not provided. + */ + id?: string; + + /** + * The arguments of the function call (for `function_call` items). + */ + arguments?: string; + + /** + * The ID of the function call (for `function_call` and `function_call_output` + * items). If passed on a `function_call_output` item, the server will check that a + * `function_call` item with the same ID exists in the conversation history. + */ + call_id?: string; + + /** + * The content of the message, applicable for `message` items. + * + * - Message items of role `system` support only `input_text` content + * - Message items of role `user` support `input_text` and `input_audio` content + * - Message items of role `assistant` support `text` content. + */ + content?: Array; + + /** + * The name of the function being called (for `function_call` items). + */ + name?: string; + + /** + * Identifier for the API object being returned - always `realtime.item`. + */ + object?: 'realtime.item'; + + /** + * The output of the function call (for `function_call_output` items). + */ + output?: string; + + /** + * The role of the message sender (`user`, `assistant`, `system`), only applicable + * for `message` items. + */ + role?: 'user' | 'assistant' | 'system'; + + /** + * The status of the item (`completed`, `incomplete`). These have no effect on the + * conversation, but are accepted for consistency with the + * `conversation.item.created` event. + */ + status?: 'completed' | 'incomplete'; + + /** + * The type of the item (`message`, `function_call`, `function_call_output`). + */ + type?: 'message' | 'function_call' | 'function_call_output'; +} + +export interface ConversationItemContent { + /** + * ID of a previous conversation item to reference (for `item_reference` content + * types in `response.create` events). These can reference both client and server + * created items. + */ + id?: string; + + /** + * Base64-encoded audio bytes, used for `input_audio` content type. + */ + audio?: string; + + /** + * The text content, used for `input_text` and `text` content types. + */ + text?: string; + + /** + * The transcript of the audio, used for `input_audio` content type. + */ + transcript?: string; + + /** + * The content type (`input_text`, `input_audio`, `item_reference`, `text`). + */ + type?: 'input_text' | 'input_audio' | 'item_reference' | 'text'; +} + +/** + * Add a new Item to the Conversation's context, including messages, function + * calls, and function call responses. This event can be used both to populate a + * "history" of the conversation and to add new items mid-stream, but has the + * current limitation that it cannot populate assistant audio messages. + * + * If successful, the server will respond with a `conversation.item.created` event, + * otherwise an `error` event will be sent. + */ +export interface ConversationItemCreateEvent { + /** + * The item to add to the conversation. + */ + item: ConversationItem; + + /** + * The event type, must be `conversation.item.create`. + */ + type: 'conversation.item.create'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; + + /** + * The ID of the preceding item after which the new item will be inserted. If not + * set, the new item will be appended to the end of the conversation. If set, it + * allows an item to be inserted mid-conversation. If the ID cannot be found, an + * error will be returned and the item will not be added. + */ + previous_item_id?: string; +} + +/** + * Returned when a conversation item is created. There are several scenarios that + * produce this event: + * + * - The server is generating a Response, which if successful will produce either + * one or two Items, which will be of type `message` (role `assistant`) or type + * `function_call`. + * - The input audio buffer has been committed, either by the client or the server + * (in `server_vad` mode). The server will take the content of the input audio + * buffer and add it to a new user message Item. + * - The client has sent a `conversation.item.create` event to add a new Item to + * the Conversation. + */ +export interface ConversationItemCreatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The item to add to the conversation. + */ + item: ConversationItem; + + /** + * The ID of the preceding item in the Conversation context, allows the client to + * understand the order of the conversation. + */ + previous_item_id: string; + + /** + * The event type, must be `conversation.item.created`. + */ + type: 'conversation.item.created'; +} + +/** + * Send this event when you want to remove any item from the conversation history. + * The server will respond with a `conversation.item.deleted` event, unless the + * item does not exist in the conversation history, in which case the server will + * respond with an error. + */ +export interface ConversationItemDeleteEvent { + /** + * The ID of the item to delete. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.delete`. + */ + type: 'conversation.item.delete'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Returned when an item in the conversation is deleted by the client with a + * `conversation.item.delete` event. This event is used to synchronize the server's + * understanding of the conversation history with the client's view. + */ +export interface ConversationItemDeletedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item that was deleted. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.deleted`. + */ + type: 'conversation.item.deleted'; +} + +/** + * This event is the output of audio transcription for user audio written to the + * user audio buffer. Transcription begins when the input audio buffer is committed + * by the client or server (in `server_vad` mode). Transcription runs + * asynchronously with Response creation, so this event may come before or after + * the Response events. + * + * Realtime API models accept audio natively, and thus input transcription is a + * separate process run on a separate ASR (Automatic Speech Recognition) model, + * currently always `whisper-1`. Thus the transcript may diverge somewhat from the + * model's interpretation, and should be treated as a rough guide. + */ +export interface ConversationItemInputAudioTranscriptionCompletedEvent { + /** + * The index of the content part containing the audio. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the user message item containing the audio. + */ + item_id: string; + + /** + * The transcribed text. + */ + transcript: string; + + /** + * The event type, must be `conversation.item.input_audio_transcription.completed`. + */ + type: 'conversation.item.input_audio_transcription.completed'; +} + +/** + * Returned when input audio transcription is configured, and a transcription + * request for a user message failed. These events are separate from other `error` + * events so that the client can identify the related Item. + */ +export interface ConversationItemInputAudioTranscriptionFailedEvent { + /** + * The index of the content part containing the audio. + */ + content_index: number; + + /** + * Details of the transcription error. + */ + error: ConversationItemInputAudioTranscriptionFailedEvent.Error; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the user message item. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.input_audio_transcription.failed`. + */ + type: 'conversation.item.input_audio_transcription.failed'; +} + +export namespace ConversationItemInputAudioTranscriptionFailedEvent { + /** + * Details of the transcription error. + */ + export interface Error { + /** + * Error code, if any. + */ + code?: string; + + /** + * A human-readable error message. + */ + message?: string; + + /** + * Parameter related to the error, if any. + */ + param?: string; + + /** + * The type of error. + */ + type?: string; + } +} + +/** + * Send this event to truncate a previous assistant message’s audio. The server + * will produce audio faster than realtime, so this event is useful when the user + * interrupts to truncate audio that has already been sent to the client but not + * yet played. This will synchronize the server's understanding of the audio with + * the client's playback. + * + * Truncating audio will delete the server-side text transcript to ensure there is + * not text in the context that hasn't been heard by the user. + * + * If successful, the server will respond with a `conversation.item.truncated` + * event. + */ +export interface ConversationItemTruncateEvent { + /** + * Inclusive duration up to which audio is truncated, in milliseconds. If the + * audio_end_ms is greater than the actual audio duration, the server will respond + * with an error. + */ + audio_end_ms: number; + + /** + * The index of the content part to truncate. Set this to 0. + */ + content_index: number; + + /** + * The ID of the assistant message item to truncate. Only assistant message items + * can be truncated. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.truncate`. + */ + type: 'conversation.item.truncate'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Returned when an earlier assistant audio message item is truncated by the client + * with a `conversation.item.truncate` event. This event is used to synchronize the + * server's understanding of the audio with the client's playback. + * + * This action will truncate the audio and remove the server-side text transcript + * to ensure there is no text in the context that hasn't been heard by the user. + */ +export interface ConversationItemTruncatedEvent { + /** + * The duration up to which the audio was truncated, in milliseconds. + */ + audio_end_ms: number; + + /** + * The index of the content part that was truncated. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the assistant message item that was truncated. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.truncated`. + */ + type: 'conversation.item.truncated'; +} + +/** + * Returned when an error occurs, which could be a client problem or a server + * problem. Most errors are recoverable and the session will stay open, we + * recommend to implementors to monitor and log error messages by default. + */ +export interface ErrorEvent { + /** + * Details of the error. + */ + error: ErrorEvent.Error; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The event type, must be `error`. + */ + type: 'error'; +} + +export namespace ErrorEvent { + /** + * Details of the error. + */ + export interface Error { + /** + * A human-readable error message. + */ + message: string; + + /** + * The type of error (e.g., "invalid_request_error", "server_error"). + */ + type: string; + + /** + * Error code, if any. + */ + code?: string | null; + + /** + * The event_id of the client event that caused the error, if applicable. + */ + event_id?: string | null; + + /** + * Parameter related to the error, if any. + */ + param?: string | null; + } +} + +/** + * Send this event to append audio bytes to the input audio buffer. The audio + * buffer is temporary storage you can write to and later commit. In Server VAD + * mode, the audio buffer is used to detect speech and the server will decide when + * to commit. When Server VAD is disabled, you must commit the audio buffer + * manually. + * + * The client may choose how much audio to place in each event up to a maximum of + * 15 MiB, for example streaming smaller chunks from the client may allow the VAD + * to be more responsive. Unlike made other client events, the server will not send + * a confirmation response to this event. + */ +export interface InputAudioBufferAppendEvent { + /** + * Base64-encoded audio bytes. This must be in the format specified by the + * `input_audio_format` field in the session configuration. + */ + audio: string; + + /** + * The event type, must be `input_audio_buffer.append`. + */ + type: 'input_audio_buffer.append'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Send this event to clear the audio bytes in the buffer. The server will respond + * with an `input_audio_buffer.cleared` event. + */ +export interface InputAudioBufferClearEvent { + /** + * The event type, must be `input_audio_buffer.clear`. + */ + type: 'input_audio_buffer.clear'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Returned when the input audio buffer is cleared by the client with a + * `input_audio_buffer.clear` event. + */ +export interface InputAudioBufferClearedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The event type, must be `input_audio_buffer.cleared`. + */ + type: 'input_audio_buffer.cleared'; +} + +/** + * Send this event to commit the user input audio buffer, which will create a new + * user message item in the conversation. This event will produce an error if the + * input audio buffer is empty. When in Server VAD mode, the client does not need + * to send this event, the server will commit the audio buffer automatically. + * + * Committing the input audio buffer will trigger input audio transcription (if + * enabled in session configuration), but it will not create a response from the + * model. The server will respond with an `input_audio_buffer.committed` event. + */ +export interface InputAudioBufferCommitEvent { + /** + * The event type, must be `input_audio_buffer.commit`. + */ + type: 'input_audio_buffer.commit'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Returned when an input audio buffer is committed, either by the client or + * automatically in server VAD mode. The `item_id` property is the ID of the user + * message item that will be created, thus a `conversation.item.created` event will + * also be sent to the client. + */ +export interface InputAudioBufferCommittedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the user message item that will be created. + */ + item_id: string; + + /** + * The ID of the preceding item after which the new item will be inserted. + */ + previous_item_id: string; + + /** + * The event type, must be `input_audio_buffer.committed`. + */ + type: 'input_audio_buffer.committed'; +} + +/** + * Sent by the server when in `server_vad` mode to indicate that speech has been + * detected in the audio buffer. This can happen any time audio is added to the + * buffer (unless speech is already detected). The client may want to use this + * event to interrupt audio playback or provide visual feedback to the user. + * + * The client should expect to receive a `input_audio_buffer.speech_stopped` event + * when speech stops. The `item_id` property is the ID of the user message item + * that will be created when speech stops and will also be included in the + * `input_audio_buffer.speech_stopped` event (unless the client manually commits + * the audio buffer during VAD activation). + */ +export interface InputAudioBufferSpeechStartedEvent { + /** + * Milliseconds from the start of all audio written to the buffer during the + * session when speech was first detected. This will correspond to the beginning of + * audio sent to the model, and thus includes the `prefix_padding_ms` configured in + * the Session. + */ + audio_start_ms: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the user message item that will be created when speech stops. + */ + item_id: string; + + /** + * The event type, must be `input_audio_buffer.speech_started`. + */ + type: 'input_audio_buffer.speech_started'; +} + +/** + * Returned in `server_vad` mode when the server detects the end of speech in the + * audio buffer. The server will also send an `conversation.item.created` event + * with the user message item that is created from the audio buffer. + */ +export interface InputAudioBufferSpeechStoppedEvent { + /** + * Milliseconds since the session started when speech stopped. This will correspond + * to the end of audio sent to the model, and thus includes the + * `min_silence_duration_ms` configured in the Session. + */ + audio_end_ms: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the user message item that will be created. + */ + item_id: string; + + /** + * The event type, must be `input_audio_buffer.speech_stopped`. + */ + type: 'input_audio_buffer.speech_stopped'; +} + +/** + * Emitted at the beginning of a Response to indicate the updated rate limits. When + * a Response is created some tokens will be "reserved" for the output tokens, the + * rate limits shown here reflect that reservation, which is then adjusted + * accordingly once the Response is completed. + */ +export interface RateLimitsUpdatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * List of rate limit information. + */ + rate_limits: Array; + + /** + * The event type, must be `rate_limits.updated`. + */ + type: 'rate_limits.updated'; +} + +export namespace RateLimitsUpdatedEvent { + export interface RateLimit { + /** + * The maximum allowed value for the rate limit. + */ + limit?: number; + + /** + * The name of the rate limit (`requests`, `tokens`). + */ + name?: 'requests' | 'tokens'; + + /** + * The remaining value before the limit is reached. + */ + remaining?: number; + + /** + * Seconds until the rate limit resets. + */ + reset_seconds?: number; + } +} + +/** + * All events that the client can send to the Realtime API + */ +export type RealtimeClientEvent = + | SessionUpdateEvent + | InputAudioBufferAppendEvent + | InputAudioBufferCommitEvent + | InputAudioBufferClearEvent + | ConversationItemCreateEvent + | ConversationItemTruncateEvent + | ConversationItemDeleteEvent + | ResponseCreateEvent + | ResponseCancelEvent; + +/** + * The response resource. + */ +export interface RealtimeResponse { + /** + * The unique ID of the response. + */ + id?: string; + + /** + * Developer-provided string key-value pairs associated with this response. + */ + metadata?: unknown | null; + + /** + * The object type, must be `realtime.response`. + */ + object?: 'realtime.response'; + + /** + * The list of output items generated by the response. + */ + output?: Array; + + /** + * The final status of the response (`completed`, `cancelled`, `failed`, or + * `incomplete`). + */ + status?: 'completed' | 'cancelled' | 'failed' | 'incomplete'; + + /** + * Additional details about the status. + */ + status_details?: RealtimeResponseStatus; + + /** + * Usage statistics for the Response, this will correspond to billing. A Realtime + * API session will maintain a conversation context and append new Items to the + * Conversation, thus output from previous turns (text and audio tokens) will + * become the input for later turns. + */ + usage?: RealtimeResponseUsage; +} + +/** + * Additional details about the status. + */ +export interface RealtimeResponseStatus { + /** + * A description of the error that caused the response to fail, populated when the + * `status` is `failed`. + */ + error?: RealtimeResponseStatus.Error; + + /** + * The reason the Response did not complete. For a `cancelled` Response, one of + * `turn_detected` (the server VAD detected a new start of speech) or + * `client_cancelled` (the client sent a cancel event). For an `incomplete` + * Response, one of `max_output_tokens` or `content_filter` (the server-side safety + * filter activated and cut off the response). + */ + reason?: 'turn_detected' | 'client_cancelled' | 'max_output_tokens' | 'content_filter'; + + /** + * The type of error that caused the response to fail, corresponding with the + * `status` field (`completed`, `cancelled`, `incomplete`, `failed`). + */ + type?: 'completed' | 'cancelled' | 'incomplete' | 'failed'; +} + +export namespace RealtimeResponseStatus { + /** + * A description of the error that caused the response to fail, populated when the + * `status` is `failed`. + */ + export interface Error { + /** + * Error code, if any. + */ + code?: string; + + /** + * The type of error. + */ + type?: string; + } +} + +/** + * Usage statistics for the Response, this will correspond to billing. A Realtime + * API session will maintain a conversation context and append new Items to the + * Conversation, thus output from previous turns (text and audio tokens) will + * become the input for later turns. + */ +export interface RealtimeResponseUsage { + /** + * Details about the input tokens used in the Response. + */ + input_token_details?: RealtimeResponseUsage.InputTokenDetails; + + /** + * The number of input tokens used in the Response, including text and audio + * tokens. + */ + input_tokens?: number; + + /** + * Details about the output tokens used in the Response. + */ + output_token_details?: RealtimeResponseUsage.OutputTokenDetails; + + /** + * The number of output tokens sent in the Response, including text and audio + * tokens. + */ + output_tokens?: number; + + /** + * The total number of tokens in the Response including input and output text and + * audio tokens. + */ + total_tokens?: number; +} + +export namespace RealtimeResponseUsage { + /** + * Details about the input tokens used in the Response. + */ + export interface InputTokenDetails { + /** + * The number of audio tokens used in the Response. + */ + audio_tokens?: number; + + /** + * The number of cached tokens used in the Response. + */ + cached_tokens?: number; + + /** + * The number of text tokens used in the Response. + */ + text_tokens?: number; + } + + /** + * Details about the output tokens used in the Response. + */ + export interface OutputTokenDetails { + /** + * The number of audio tokens used in the Response. + */ + audio_tokens?: number; + + /** + * The number of text tokens used in the Response. + */ + text_tokens?: number; + } +} + +/** + * All events that the Realtime API can send back + */ +export type RealtimeServerEvent = + | ErrorEvent + | SessionCreatedEvent + | SessionUpdatedEvent + | ConversationCreatedEvent + | InputAudioBufferCommittedEvent + | InputAudioBufferClearedEvent + | InputAudioBufferSpeechStartedEvent + | InputAudioBufferSpeechStoppedEvent + | ConversationItemCreatedEvent + | ConversationItemInputAudioTranscriptionCompletedEvent + | ConversationItemInputAudioTranscriptionFailedEvent + | ConversationItemTruncatedEvent + | ConversationItemDeletedEvent + | ResponseCreatedEvent + | ResponseDoneEvent + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent + | ResponseTextDeltaEvent + | ResponseTextDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent + | RateLimitsUpdatedEvent; + +/** + * Returned when the model-generated audio is updated. + */ +export interface ResponseAudioDeltaEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * Base64-encoded audio data delta. + */ + delta: string; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.audio.delta`. + */ + type: 'response.audio.delta'; +} + +/** + * Returned when the model-generated audio is done. Also emitted when a Response is + * interrupted, incomplete, or cancelled. + */ +export interface ResponseAudioDoneEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.audio.done`. + */ + type: 'response.audio.done'; +} + +/** + * Returned when the model-generated transcription of audio output is updated. + */ +export interface ResponseAudioTranscriptDeltaEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The transcript delta. + */ + delta: string; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.audio_transcript.delta`. + */ + type: 'response.audio_transcript.delta'; +} + +/** + * Returned when the model-generated transcription of audio output is done + * streaming. Also emitted when a Response is interrupted, incomplete, or + * cancelled. + */ +export interface ResponseAudioTranscriptDoneEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The final transcript of the audio. + */ + transcript: string; + + /** + * The event type, must be `response.audio_transcript.done`. + */ + type: 'response.audio_transcript.done'; +} + +/** + * Send this event to cancel an in-progress response. The server will respond with + * a `response.cancelled` event or an error if there is no response to cancel. + */ +export interface ResponseCancelEvent { + /** + * The event type, must be `response.cancel`. + */ + type: 'response.cancel'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; + + /** + * A specific response ID to cancel - if not provided, will cancel an in-progress + * response in the default conversation. + */ + response_id?: string; +} + +/** + * Returned when a new content part is added to an assistant message item during + * response generation. + */ +export interface ResponseContentPartAddedEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item to which the content part was added. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The content part that was added. + */ + part: ResponseContentPartAddedEvent.Part; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.content_part.added`. + */ + type: 'response.content_part.added'; +} + +export namespace ResponseContentPartAddedEvent { + /** + * The content part that was added. + */ + export interface Part { + /** + * Base64-encoded audio data (if type is "audio"). + */ + audio?: string; + + /** + * The text content (if type is "text"). + */ + text?: string; + + /** + * The transcript of the audio (if type is "audio"). + */ + transcript?: string; + + /** + * The content type ("text", "audio"). + */ + type?: 'text' | 'audio'; + } +} + +/** + * Returned when a content part is done streaming in an assistant message item. + * Also emitted when a Response is interrupted, incomplete, or cancelled. + */ +export interface ResponseContentPartDoneEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The content part that is done. + */ + part: ResponseContentPartDoneEvent.Part; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.content_part.done`. + */ + type: 'response.content_part.done'; +} + +export namespace ResponseContentPartDoneEvent { + /** + * The content part that is done. + */ + export interface Part { + /** + * Base64-encoded audio data (if type is "audio"). + */ + audio?: string; + + /** + * The text content (if type is "text"). + */ + text?: string; + + /** + * The transcript of the audio (if type is "audio"). + */ + transcript?: string; + + /** + * The content type ("text", "audio"). + */ + type?: 'text' | 'audio'; + } +} + +/** + * This event instructs the server to create a Response, which means triggering + * model inference. When in Server VAD mode, the server will create Responses + * automatically. + * + * A Response will include at least one Item, and may have two, in which case the + * second will be a function call. These Items will be appended to the conversation + * history. + * + * The server will respond with a `response.created` event, events for Items and + * content created, and finally a `response.done` event to indicate the Response is + * complete. + * + * The `response.create` event includes inference configuration like + * `instructions`, and `temperature`. These fields will override the Session's + * configuration for this Response only. + */ +export interface ResponseCreateEvent { + /** + * The event type, must be `response.create`. + */ + type: 'response.create'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; + + /** + * Create a new Realtime response with these parameters + */ + response?: ResponseCreateEvent.Response; +} + +export namespace ResponseCreateEvent { + /** + * Create a new Realtime response with these parameters + */ + export interface Response { + /** + * Controls which conversation the response is added to. Currently supports `auto` + * and `none`, with `auto` as the default value. The `auto` value means that the + * contents of the response will be added to the default conversation. Set this to + * `none` to create an out-of-band response which will not add items to default + * conversation. + */ + conversation?: (string & {}) | 'auto' | 'none'; + + /** + * Input items to include in the prompt for the model. Creates a new context for + * this response, without including the default conversation. Can include + * references to items from the default conversation. + */ + input?: Array; + + /** + * The default system instructions (i.e. system message) prepended to model calls. + * This field allows the client to guide the model on desired responses. The model + * can be instructed on response content and format, (e.g. "be extremely succinct", + * "act friendly", "here are examples of good responses") and on audio behavior + * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + * instructions are not guaranteed to be followed by the model, but they provide + * guidance to the model on the desired behavior. + * + * Note that the server sets default instructions which will be used if this field + * is not set and are visible in the `session.created` event at the start of the + * session. + */ + instructions?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + * `inf` for the maximum available tokens for a given model. Defaults to `inf`. + */ + max_response_output_tokens?: number | 'inf'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maximum of 512 + * characters long. + */ + metadata?: unknown | null; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + */ + temperature?: number; + + /** + * How the model chooses tools. Options are `auto`, `none`, `required`, or specify + * a function, like `{"type": "function", "function": {"name": "my_function"}}`. + */ + tool_choice?: string; + + /** + * Tools (functions) available to the model. + */ + tools?: Array; + + /** + * The voice the model uses to respond. Voice cannot be changed during the session + * once the model has responded with audio at least once. Current voice options are + * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + */ + voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + } + + export namespace Response { + export interface Tool { + /** + * The description of the function, including guidance on when and how to call it, + * and guidance about what to tell the user when calling (if anything). + */ + description?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * Parameters of the function in JSON Schema. + */ + parameters?: unknown; + + /** + * The type of the tool, i.e. `function`. + */ + type?: 'function'; + } + } +} + +/** + * Returned when a new Response is created. The first event of response creation, + * where the response is in an initial state of `in_progress`. + */ +export interface ResponseCreatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The response resource. + */ + response: RealtimeResponse; + + /** + * The event type, must be `response.created`. + */ + type: 'response.created'; +} + +/** + * Returned when a Response is done streaming. Always emitted, no matter the final + * state. The Response object included in the `response.done` event will include + * all output Items in the Response but will omit the raw audio data. + */ +export interface ResponseDoneEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The response resource. + */ + response: RealtimeResponse; + + /** + * The event type, must be `response.done`. + */ + type: 'response.done'; +} + +/** + * Returned when the model-generated function call arguments are updated. + */ +export interface ResponseFunctionCallArgumentsDeltaEvent { + /** + * The ID of the function call. + */ + call_id: string; + + /** + * The arguments delta as a JSON string. + */ + delta: string; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the function call item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.function_call_arguments.delta`. + */ + type: 'response.function_call_arguments.delta'; +} + +/** + * Returned when the model-generated function call arguments are done streaming. + * Also emitted when a Response is interrupted, incomplete, or cancelled. + */ +export interface ResponseFunctionCallArgumentsDoneEvent { + /** + * The final arguments as a JSON string. + */ + arguments: string; + + /** + * The ID of the function call. + */ + call_id: string; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the function call item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.function_call_arguments.done`. + */ + type: 'response.function_call_arguments.done'; +} + +/** + * Returned when a new Item is created during Response generation. + */ +export interface ResponseOutputItemAddedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The item to add to the conversation. + */ + item: ConversationItem; + + /** + * The index of the output item in the Response. + */ + output_index: number; + + /** + * The ID of the Response to which the item belongs. + */ + response_id: string; + + /** + * The event type, must be `response.output_item.added`. + */ + type: 'response.output_item.added'; +} + +/** + * Returned when an Item is done streaming. Also emitted when a Response is + * interrupted, incomplete, or cancelled. + */ +export interface ResponseOutputItemDoneEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The item to add to the conversation. + */ + item: ConversationItem; + + /** + * The index of the output item in the Response. + */ + output_index: number; + + /** + * The ID of the Response to which the item belongs. + */ + response_id: string; + + /** + * The event type, must be `response.output_item.done`. + */ + type: 'response.output_item.done'; +} + +/** + * Returned when the text value of a "text" content part is updated. + */ +export interface ResponseTextDeltaEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The text delta. + */ + delta: string; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.text.delta`. + */ + type: 'response.text.delta'; +} + +/** + * Returned when the text value of a "text" content part is done streaming. Also + * emitted when a Response is interrupted, incomplete, or cancelled. + */ +export interface ResponseTextDoneEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The final text content. + */ + text: string; + + /** + * The event type, must be `response.text.done`. + */ + type: 'response.text.done'; +} + +/** + * Returned when a Session is created. Emitted automatically when a new connection + * is established as the first server event. This event will contain the default + * Session configuration. + */ +export interface SessionCreatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * Realtime session object configuration. + */ + session: SessionsAPI.Session; + + /** + * The event type, must be `session.created`. + */ + type: 'session.created'; +} + +/** + * Send this event to update the session’s default configuration. The client may + * send this event at any time to update the session configuration, and any field + * may be updated at any time, except for "voice". The server will respond with a + * `session.updated` event that shows the full effective configuration. Only fields + * that are present are updated, thus the correct way to clear a field like + * "instructions" is to pass an empty string. + */ +export interface SessionUpdateEvent { + /** + * Realtime session object configuration. + */ + session: SessionUpdateEvent.Session; + + /** + * The event type, must be `session.update`. + */ + type: 'session.update'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +export namespace SessionUpdateEvent { + /** + * Realtime session object configuration. + */ + export interface Session { + /** + * The Realtime model used for this session. + */ + model: + | 'gpt-4o-realtime-preview' + | 'gpt-4o-realtime-preview-2024-10-01' + | 'gpt-4o-realtime-preview-2024-12-17' + | 'gpt-4o-mini-realtime-preview' + | 'gpt-4o-mini-realtime-preview-2024-12-17'; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + input_audio_transcription?: Session.InputAudioTranscription; + + /** + * The default system instructions (i.e. system message) prepended to model calls. + * This field allows the client to guide the model on desired responses. The model + * can be instructed on response content and format, (e.g. "be extremely succinct", + * "act friendly", "here are examples of good responses") and on audio behavior + * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + * instructions are not guaranteed to be followed by the model, but they provide + * guidance to the model on the desired behavior. + * + * Note that the server sets default instructions which will be used if this field + * is not set and are visible in the `session.created` event at the start of the + * session. + */ + instructions?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + * `inf` for the maximum available tokens for a given model. Defaults to `inf`. + */ + max_response_output_tokens?: number | 'inf'; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + */ + temperature?: number; + + /** + * How the model chooses tools. Options are `auto`, `none`, `required`, or specify + * a function. + */ + tool_choice?: string; + + /** + * Tools (functions) available to the model. + */ + tools?: Array; + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + turn_detection?: Session.TurnDetection; + + /** + * The voice the model uses to respond. Voice cannot be changed during the session + * once the model has responded with audio at least once. Current voice options are + * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + */ + voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + } + + export namespace Session { + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + export interface InputAudioTranscription { + /** + * The model to use for transcription, `whisper-1` is the only currently supported + * model. + */ + model?: string; + } + + export interface Tool { + /** + * The description of the function, including guidance on when and how to call it, + * and guidance about what to tell the user when calling (if anything). + */ + description?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * Parameters of the function in JSON Schema. + */ + parameters?: unknown; + + /** + * The type of the tool, i.e. `function`. + */ + type?: 'function'; + } + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + export interface TurnDetection { + /** + * Whether or not to automatically generate a response when VAD is enabled. `true` + * by default. + */ + create_response?: boolean; + + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + * Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + * With shorter values the model will respond more quickly, but may jump in on + * short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection, only `server_vad` is currently supported. + */ + type?: string; + } + } +} + +/** + * Returned when a session is updated with a `session.update` event, unless there + * is an error. + */ +export interface SessionUpdatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * Realtime session object configuration. + */ + session: SessionsAPI.Session; + + /** + * The event type, must be `session.updated`. + */ + type: 'session.updated'; +} + +Realtime.Sessions = Sessions; + +export declare namespace Realtime { + export { + Sessions as Sessions, + type SessionsAPISession as Session, + type SessionCreateResponse as SessionCreateResponse, + type SessionCreateParams as SessionCreateParams, + }; +} diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts new file mode 100644 index 000000000..c1082d236 --- /dev/null +++ b/src/resources/beta/realtime/sessions.ts @@ -0,0 +1,546 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as Core from '../../../core'; + +export class Sessions extends APIResource { + /** + * Create an ephemeral API token for use in client-side applications with the + * Realtime API. Can be configured with the same session parameters as the + * `session.update` client event. + * + * It responds with a session object, plus a `client_secret` key which contains a + * usable ephemeral API token that can be used to authenticate browser clients for + * the Realtime API. + */ + create(body: SessionCreateParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/realtime/sessions', { + body, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } +} + +/** + * Realtime session object configuration. + */ +export interface Session { + /** + * Unique identifier for the session object. + */ + id?: string; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + input_audio_transcription?: Session.InputAudioTranscription; + + /** + * The default system instructions (i.e. system message) prepended to model calls. + * This field allows the client to guide the model on desired responses. The model + * can be instructed on response content and format, (e.g. "be extremely succinct", + * "act friendly", "here are examples of good responses") and on audio behavior + * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + * instructions are not guaranteed to be followed by the model, but they provide + * guidance to the model on the desired behavior. + * + * Note that the server sets default instructions which will be used if this field + * is not set and are visible in the `session.created` event at the start of the + * session. + */ + instructions?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + * `inf` for the maximum available tokens for a given model. Defaults to `inf`. + */ + max_response_output_tokens?: number | 'inf'; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The Realtime model used for this session. + */ + model?: + | (string & {}) + | 'gpt-4o-realtime-preview' + | 'gpt-4o-realtime-preview-2024-10-01' + | 'gpt-4o-realtime-preview-2024-12-17' + | 'gpt-4o-mini-realtime-preview' + | 'gpt-4o-mini-realtime-preview-2024-12-17'; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + */ + temperature?: number; + + /** + * How the model chooses tools. Options are `auto`, `none`, `required`, or specify + * a function. + */ + tool_choice?: string; + + /** + * Tools (functions) available to the model. + */ + tools?: Array; + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + turn_detection?: Session.TurnDetection | null; + + /** + * The voice the model uses to respond. Voice cannot be changed during the session + * once the model has responded with audio at least once. Current voice options are + * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + */ + voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; +} + +export namespace Session { + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + export interface InputAudioTranscription { + /** + * The model to use for transcription, `whisper-1` is the only currently supported + * model. + */ + model?: string; + } + + export interface Tool { + /** + * The description of the function, including guidance on when and how to call it, + * and guidance about what to tell the user when calling (if anything). + */ + description?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * Parameters of the function in JSON Schema. + */ + parameters?: unknown; + + /** + * The type of the tool, i.e. `function`. + */ + type?: 'function'; + } + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + export interface TurnDetection { + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + * Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + * With shorter values the model will respond more quickly, but may jump in on + * short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection, only `server_vad` is currently supported. + */ + type?: 'server_vad'; + } +} + +/** + * A new Realtime session configuration, with an ephermeral key. Default TTL for + * keys is one minute. + */ +export interface SessionCreateResponse { + /** + * Ephemeral key returned by the API. + */ + client_secret?: SessionCreateResponse.ClientSecret; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + input_audio_format?: string; + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + input_audio_transcription?: SessionCreateResponse.InputAudioTranscription; + + /** + * The default system instructions (i.e. system message) prepended to model calls. + * This field allows the client to guide the model on desired responses. The model + * can be instructed on response content and format, (e.g. "be extremely succinct", + * "act friendly", "here are examples of good responses") and on audio behavior + * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + * instructions are not guaranteed to be followed by the model, but they provide + * guidance to the model on the desired behavior. + * + * Note that the server sets default instructions which will be used if this field + * is not set and are visible in the `session.created` event at the start of the + * session. + */ + instructions?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + * `inf` for the maximum available tokens for a given model. Defaults to `inf`. + */ + max_response_output_tokens?: number | 'inf'; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + output_audio_format?: string; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + */ + temperature?: number; + + /** + * How the model chooses tools. Options are `auto`, `none`, `required`, or specify + * a function. + */ + tool_choice?: string; + + /** + * Tools (functions) available to the model. + */ + tools?: Array; + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + turn_detection?: SessionCreateResponse.TurnDetection; + + /** + * The voice the model uses to respond. Voice cannot be changed during the session + * once the model has responded with audio at least once. Current voice options are + * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + */ + voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; +} + +export namespace SessionCreateResponse { + /** + * Ephemeral key returned by the API. + */ + export interface ClientSecret { + /** + * Timestamp for when the token expires. Currently, all tokens expire after one + * minute. + */ + expires_at?: number; + + /** + * Ephemeral key usable in client environments to authenticate connections to the + * Realtime API. Use this in client-side environments rather than a standard API + * token, which should only be used server-side. + */ + value?: string; + } + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + export interface InputAudioTranscription { + /** + * The model to use for transcription, `whisper-1` is the only currently supported + * model. + */ + model?: string; + } + + export interface Tool { + /** + * The description of the function, including guidance on when and how to call it, + * and guidance about what to tell the user when calling (if anything). + */ + description?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * Parameters of the function in JSON Schema. + */ + parameters?: unknown; + + /** + * The type of the tool, i.e. `function`. + */ + type?: 'function'; + } + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + export interface TurnDetection { + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + * Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + * With shorter values the model will respond more quickly, but may jump in on + * short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection, only `server_vad` is currently supported. + */ + type?: string; + } +} + +export interface SessionCreateParams { + /** + * The Realtime model used for this session. + */ + model: + | 'gpt-4o-realtime-preview' + | 'gpt-4o-realtime-preview-2024-10-01' + | 'gpt-4o-realtime-preview-2024-12-17' + | 'gpt-4o-mini-realtime-preview' + | 'gpt-4o-mini-realtime-preview-2024-12-17'; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + input_audio_transcription?: SessionCreateParams.InputAudioTranscription; + + /** + * The default system instructions (i.e. system message) prepended to model calls. + * This field allows the client to guide the model on desired responses. The model + * can be instructed on response content and format, (e.g. "be extremely succinct", + * "act friendly", "here are examples of good responses") and on audio behavior + * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + * instructions are not guaranteed to be followed by the model, but they provide + * guidance to the model on the desired behavior. + * + * Note that the server sets default instructions which will be used if this field + * is not set and are visible in the `session.created` event at the start of the + * session. + */ + instructions?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + * `inf` for the maximum available tokens for a given model. Defaults to `inf`. + */ + max_response_output_tokens?: number | 'inf'; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + */ + temperature?: number; + + /** + * How the model chooses tools. Options are `auto`, `none`, `required`, or specify + * a function. + */ + tool_choice?: string; + + /** + * Tools (functions) available to the model. + */ + tools?: Array; + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + turn_detection?: SessionCreateParams.TurnDetection; + + /** + * The voice the model uses to respond. Voice cannot be changed during the session + * once the model has responded with audio at least once. Current voice options are + * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + */ + voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; +} + +export namespace SessionCreateParams { + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + export interface InputAudioTranscription { + /** + * The model to use for transcription, `whisper-1` is the only currently supported + * model. + */ + model?: string; + } + + export interface Tool { + /** + * The description of the function, including guidance on when and how to call it, + * and guidance about what to tell the user when calling (if anything). + */ + description?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * Parameters of the function in JSON Schema. + */ + parameters?: unknown; + + /** + * The type of the tool, i.e. `function`. + */ + type?: 'function'; + } + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + export interface TurnDetection { + /** + * Whether or not to automatically generate a response when VAD is enabled. `true` + * by default. + */ + create_response?: boolean; + + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + * Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + * With shorter values the model will respond more quickly, but may jump in on + * short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection, only `server_vad` is currently supported. + */ + type?: string; + } +} + +export declare namespace Sessions { + export { + type Session as Session, + type SessionCreateResponse as SessionCreateResponse, + type SessionCreateParams as SessionCreateParams, + }; +} diff --git a/tests/api-resources/beta/realtime/sessions.test.ts b/tests/api-resources/beta/realtime/sessions.test.ts new file mode 100644 index 000000000..0ed998c27 --- /dev/null +++ b/tests/api-resources/beta/realtime/sessions.test.ts @@ -0,0 +1,45 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource sessions', () => { + test('create: only required params', async () => { + const responsePromise = client.beta.realtime.sessions.create({ model: 'gpt-4o-realtime-preview' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.beta.realtime.sessions.create({ + model: 'gpt-4o-realtime-preview', + input_audio_format: 'pcm16', + input_audio_transcription: { model: 'model' }, + instructions: 'instructions', + max_response_output_tokens: 0, + modalities: ['text'], + output_audio_format: 'pcm16', + temperature: 0, + tool_choice: 'tool_choice', + tools: [{ description: 'description', name: 'name', parameters: {}, type: 'function' }], + turn_detection: { + create_response: true, + prefix_padding_ms: 0, + silence_duration_ms: 0, + threshold: 0, + type: 'type', + }, + voice: 'alloy', + }); + }); +}); From 66c9715482827f7f28f5b6b8592185ae338b5379 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 9 Jan 2025 05:07:17 +0000 Subject: [PATCH 078/246] release: 4.78.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e66c326a9..9785f7c4a 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.77.4" + ".": "4.78.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a811f188..fbc82e722 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.78.0 (2025-01-09) + +Full Changelog: [v4.77.4...v4.78.0](https://github.com/openai/openai-node/compare/v4.77.4...v4.78.0) + +### Features + +* **client:** add realtime types ([#1254](https://github.com/openai/openai-node/issues/1254)) ([7130995](https://github.com/openai/openai-node/commit/71309957a9a0883cac84b8b57697b796a9df3503)) + ## 4.77.4 (2025-01-08) Full Changelog: [v4.77.3...v4.77.4](https://github.com/openai/openai-node/compare/v4.77.3...v4.77.4) diff --git a/jsr.json b/jsr.json index da442da31..e26f2d5d8 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.77.4", + "version": "4.78.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 453859b6b..ab06be9cf 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.77.4", + "version": "4.78.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 7f6adc9bc..7ab855b86 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.77.4'; // x-release-please-version +export const VERSION = '4.78.0'; // x-release-please-version From 6070d964f6d62789f7deb670daa49f3c4f0a6f40 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 15:56:22 +0000 Subject: [PATCH 079/246] fix: send correct Accept header for certain endpoints (#1257) --- src/resources/audio/speech.ts | 7 ++++++- src/resources/files.ts | 11 ++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 1cda80f79..bd2ed9f65 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -9,7 +9,12 @@ export class Speech extends APIResource { * Generates audio from the input text. */ create(body: SpeechCreateParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/audio/speech', { body, ...options, __binaryResponse: true }); + return this._client.post('/audio/speech', { + body, + ...options, + headers: { Accept: 'application/octet-stream', ...options?.headers }, + __binaryResponse: true, + }); } } diff --git a/src/resources/files.ts b/src/resources/files.ts index 42a7bdfba..43708310b 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -69,7 +69,11 @@ export class Files extends APIResource { * Returns the contents of the specified file. */ content(fileId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/files/${fileId}/content`, { ...options, __binaryResponse: true }); + return this._client.get(`/files/${fileId}/content`, { + ...options, + headers: { Accept: 'application/binary', ...options?.headers }, + __binaryResponse: true, + }); } /** @@ -78,10 +82,7 @@ export class Files extends APIResource { * @deprecated The `.content()` method should be used instead */ retrieveContent(fileId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/files/${fileId}/content`, { - ...options, - headers: { Accept: 'application/json', ...options?.headers }, - }); + return this._client.get(`/files/${fileId}/content`, options); } /** From 14784f95797d4d525dafecfd4ec9c7a133540da0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 15:56:57 +0000 Subject: [PATCH 080/246] release: 4.78.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 9785f7c4a..3218ab333 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.78.0" + ".": "4.78.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index fbc82e722..320d00140 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.78.1 (2025-01-10) + +Full Changelog: [v4.78.0...v4.78.1](https://github.com/openai/openai-node/compare/v4.78.0...v4.78.1) + +### Bug Fixes + +* send correct Accept header for certain endpoints ([#1257](https://github.com/openai/openai-node/issues/1257)) ([8756693](https://github.com/openai/openai-node/commit/8756693c5690b16045cdd8d33636fe7643d45f3a)) + ## 4.78.0 (2025-01-09) Full Changelog: [v4.77.4...v4.78.0](https://github.com/openai/openai-node/compare/v4.77.4...v4.78.0) diff --git a/jsr.json b/jsr.json index e26f2d5d8..257faa02d 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.78.0", + "version": "4.78.1", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index ab06be9cf..ff6ec16bc 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.78.0", + "version": "4.78.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 7ab855b86..a8ac58ba2 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.78.0'; // x-release-please-version +export const VERSION = '4.78.1'; // x-release-please-version From b08a846a9aae3686574527fa2a8d91bb0e6c7aaf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 16:19:51 +0000 Subject: [PATCH 081/246] chore(internal): streaming refactors (#1261) --- src/streaming.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/streaming.ts b/src/streaming.ts index 2891e6ac3..da633f7fd 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -38,9 +38,7 @@ export class Stream implements AsyncIterable { if (sse.data.startsWith('[DONE]')) { done = true; continue; - } - - if (sse.event === null) { + } else { let data; try { From 55f084dfcae4229075ad7ebc33fff2ef4cd095e5 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 13 Jan 2025 16:28:31 +0000 Subject: [PATCH 082/246] chore: fix streaming --- src/streaming.ts | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/src/streaming.ts b/src/streaming.ts index da633f7fd..9cfd18176 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -52,22 +52,12 @@ export class Stream implements AsyncIterable { if (data && data.error) { throw new APIError(undefined, data.error, undefined, undefined); } - - yield data; - } else { - let data; - try { - data = JSON.parse(sse.data); - } catch (e) { - console.error(`Could not parse message into JSON:`, sse.data); - console.error(`From chunk:`, sse.raw); - throw e; - } // TODO: Is this where the error should be thrown? if (sse.event == 'error') { throw new APIError(undefined, data.error, data.message, undefined); } - yield { event: sse.event, data: data } as any; + + yield data; } } done = true; From 620ecd506fbf379018cf8f7a7fe92253ac49c9af Mon Sep 17 00:00:00 2001 From: Minh-Anh Phan <111523473+minhanh-phan@users.noreply.github.com> Date: Mon, 13 Jan 2025 11:42:15 -0800 Subject: [PATCH 083/246] fix(logs/azure): redact sensitive header when DEBUG is set (#1218) --- src/core.ts | 36 ++++++++++++++++- tests/index.test.ts | 94 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 128 insertions(+), 2 deletions(-) diff --git a/src/core.ts b/src/core.ts index 972cceaec..3d2d029a5 100644 --- a/src/core.ts +++ b/src/core.ts @@ -1148,9 +1148,43 @@ function applyHeadersMut(targetHeaders: Headers, newHeaders: Headers): void { } } +const SENSITIVE_HEADERS = new Set(['authorization', 'api-key']); + export function debug(action: string, ...args: any[]) { if (typeof process !== 'undefined' && process?.env?.['DEBUG'] === 'true') { - console.log(`OpenAI:DEBUG:${action}`, ...args); + const modifiedArgs = args.map((arg) => { + if (!arg) { + return arg; + } + + // Check for sensitive headers in request body 'headers' object + if (arg['headers']) { + // clone so we don't mutate + const modifiedArg = { ...arg, headers: { ...arg['headers'] } }; + + for (const header in arg['headers']) { + if (SENSITIVE_HEADERS.has(header.toLowerCase())) { + modifiedArg['headers'][header] = 'REDACTED'; + } + } + + return modifiedArg; + } + + let modifiedArg = null; + + // Check for sensitive headers in headers object + for (const header in arg) { + if (SENSITIVE_HEADERS.has(header.toLowerCase())) { + // avoid making a copy until we need to + modifiedArg ??= { ...arg }; + modifiedArg[header] = 'REDACTED'; + } + } + + return modifiedArg ?? arg; + }); + console.log(`OpenAI:DEBUG:${action}`, ...modifiedArgs); } } diff --git a/tests/index.test.ts b/tests/index.test.ts index a6f0040a4..016d525f5 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -2,7 +2,7 @@ import OpenAI from 'openai'; import { APIUserAbortError } from 'openai'; -import { Headers } from 'openai/core'; +import { debug, Headers } from 'openai/core'; import defaultFetch, { Response, type RequestInit, type RequestInfo } from 'node-fetch'; describe('instantiate client', () => { @@ -424,3 +424,95 @@ describe('retries', () => { expect(count).toEqual(3); }); }); + +describe('debug()', () => { + const env = process.env; + const spy = jest.spyOn(console, 'log'); + + beforeEach(() => { + jest.resetModules(); + process.env = { ...env }; + process.env['DEBUG'] = 'true'; + }); + + afterEach(() => { + process.env = env; + }); + + test('body request object with Authorization header', function () { + // Test request body includes headers object with Authorization + const headersTest = { + headers: { + Authorization: 'fakeAuthorization', + }, + }; + debug('request', headersTest); + expect(spy).toHaveBeenCalledWith('OpenAI:DEBUG:request', { + headers: { + Authorization: 'REDACTED', + }, + }); + }); + + test('body request object with api-key header', function () { + // Test request body includes headers object with api-ley + const apiKeyTest = { + headers: { + 'api-key': 'fakeKey', + }, + }; + debug('request', apiKeyTest); + expect(spy).toHaveBeenCalledWith('OpenAI:DEBUG:request', { + headers: { + 'api-key': 'REDACTED', + }, + }); + }); + + test('header object with Authorization header', function () { + // Test headers object with authorization header + const authorizationTest = { + authorization: 'fakeValue', + }; + debug('request', authorizationTest); + expect(spy).toHaveBeenCalledWith('OpenAI:DEBUG:request', { + authorization: 'REDACTED', + }); + }); + + test('input args are not mutated', function () { + const authorizationTest = { + authorization: 'fakeValue', + }; + const client = new OpenAI({ + baseURL: 'http://localhost:5000/', + defaultHeaders: authorizationTest, + apiKey: 'api-key', + }); + + const { req } = client.buildRequest({ path: '/foo', method: 'post' }); + debug('request', authorizationTest); + expect((req.headers as Headers)['authorization']).toEqual('fakeValue'); + expect(spy).toHaveBeenCalledWith('OpenAI:DEBUG:request', { + authorization: 'REDACTED', + }); + }); + + test('input headers are not mutated', function () { + const authorizationTest = { + authorization: 'fakeValue', + }; + const client = new OpenAI({ + baseURL: 'http://localhost:5000/', + defaultHeaders: authorizationTest, + apiKey: 'api-key', + }); + + const { req } = client.buildRequest({ path: '/foo', method: 'post' }); + debug('request', { headers: req.headers }); + expect((req.headers as Headers)['authorization']).toEqual('fakeValue'); + expect(spy).toHaveBeenCalledWith('OpenAI:DEBUG:request', { + authorization: 'REDACTED', + }); + }); +}); From 2bc96529a32fbddc8a86c53dbd8bbb93f703e056 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 13 Jan 2025 20:23:42 +0000 Subject: [PATCH 084/246] Revert "chore(internal): streaming refactors (#1261)" This reverts commit dd4af939792583854a313367c5fe2f98eea2f3c8. --- src/streaming.ts | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/streaming.ts b/src/streaming.ts index 9cfd18176..2891e6ac3 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -38,7 +38,9 @@ export class Stream implements AsyncIterable { if (sse.data.startsWith('[DONE]')) { done = true; continue; - } else { + } + + if (sse.event === null) { let data; try { @@ -52,12 +54,22 @@ export class Stream implements AsyncIterable { if (data && data.error) { throw new APIError(undefined, data.error, undefined, undefined); } + + yield data; + } else { + let data; + try { + data = JSON.parse(sse.data); + } catch (e) { + console.error(`Could not parse message into JSON:`, sse.data); + console.error(`From chunk:`, sse.raw); + throw e; + } // TODO: Is this where the error should be thrown? if (sse.event == 'error') { throw new APIError(undefined, data.error, data.message, undefined); } - - yield data; + yield { event: sse.event, data: data } as any; } } done = true; From 5df77388f6a8cfc3ac465f77825f01ceb41fa505 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 14 Jan 2025 11:59:41 +0000 Subject: [PATCH 085/246] chore(types): rename vector store chunking strategy (#1263) --- api.md | 2 +- src/resources/beta/beta.ts | 4 ++-- src/resources/beta/index.ts | 2 +- src/resources/beta/vector-stores/index.ts | 2 +- src/resources/beta/vector-stores/vector-stores.ts | 6 +++--- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api.md b/api.md index a885628a3..33ab95ef6 100644 --- a/api.md +++ b/api.md @@ -283,7 +283,7 @@ Types: - OtherFileChunkingStrategyObject - StaticFileChunkingStrategy - StaticFileChunkingStrategyObject -- StaticFileChunkingStrategyParam +- StaticFileChunkingStrategyObjectParam - VectorStore - VectorStoreDeleted diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index ccd043243..df929b2f7 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -48,7 +48,7 @@ import { OtherFileChunkingStrategyObject, StaticFileChunkingStrategy, StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyParam, + StaticFileChunkingStrategyObjectParam, VectorStore, VectorStoreCreateParams, VectorStoreDeleted, @@ -85,7 +85,7 @@ export declare namespace Beta { type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, type StaticFileChunkingStrategy as StaticFileChunkingStrategy, type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam, + type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, VectorStoresPage as VectorStoresPage, diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index aa2e52d4c..babca0016 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -46,7 +46,7 @@ export { type OtherFileChunkingStrategyObject, type StaticFileChunkingStrategy, type StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyParam, + type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, type VectorStoreCreateParams, diff --git a/src/resources/beta/vector-stores/index.ts b/src/resources/beta/vector-stores/index.ts index 89fc0cde0..d587bd160 100644 --- a/src/resources/beta/vector-stores/index.ts +++ b/src/resources/beta/vector-stores/index.ts @@ -23,7 +23,7 @@ export { type OtherFileChunkingStrategyObject, type StaticFileChunkingStrategy, type StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyParam, + type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, type VectorStoreCreateParams, diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts index 35ad8c369..cbff2d562 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/beta/vector-stores/vector-stores.ts @@ -116,7 +116,7 @@ export type FileChunkingStrategy = StaticFileChunkingStrategyObject | OtherFileC * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. */ -export type FileChunkingStrategyParam = AutoFileChunkingStrategyParam | StaticFileChunkingStrategyParam; +export type FileChunkingStrategyParam = AutoFileChunkingStrategyParam | StaticFileChunkingStrategyObjectParam; /** * This is returned when the chunking strategy is unknown. Typically, this is @@ -154,7 +154,7 @@ export interface StaticFileChunkingStrategyObject { type: 'static'; } -export interface StaticFileChunkingStrategyParam { +export interface StaticFileChunkingStrategyObjectParam { static: StaticFileChunkingStrategy; /** @@ -397,7 +397,7 @@ export declare namespace VectorStores { type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, type StaticFileChunkingStrategy as StaticFileChunkingStrategy, type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam, + type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, VectorStoresPage as VectorStoresPage, From 66067d37a4189f838f31ed9ca06ee335aef67616 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 13:58:18 +0000 Subject: [PATCH 086/246] chore(types): add `| undefined` to client options properties (#1264) --- src/index.ts | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/index.ts b/src/index.ts index 2320850fb..cf6aa89e3 100644 --- a/src/index.ts +++ b/src/index.ts @@ -137,7 +137,7 @@ export interface ClientOptions { * Note that request timeouts are retried by default, so in a worst-case scenario you may wait * much longer than this timeout before the promise succeeds or fails. */ - timeout?: number; + timeout?: number | undefined; /** * An HTTP agent used to manage HTTP(S) connections. @@ -145,7 +145,7 @@ export interface ClientOptions { * If not provided, an agent will be constructed by default in the Node.js environment, * otherwise no agent is used. */ - httpAgent?: Agent; + httpAgent?: Agent | undefined; /** * Specify a custom `fetch` function implementation. @@ -161,7 +161,7 @@ export interface ClientOptions { * * @default 2 */ - maxRetries?: number; + maxRetries?: number | undefined; /** * Default headers to include with every request to the API. @@ -169,7 +169,7 @@ export interface ClientOptions { * These can be removed in individual requests by explicitly setting the * header to `undefined` or `null` in request options. */ - defaultHeaders?: Core.Headers; + defaultHeaders?: Core.Headers | undefined; /** * Default query parameters to include with every request to the API. @@ -177,13 +177,13 @@ export interface ClientOptions { * These can be removed in individual requests by explicitly setting the * param to `undefined` in request options. */ - defaultQuery?: Core.DefaultQuery; + defaultQuery?: Core.DefaultQuery | undefined; /** * By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers. * Only set this option to `true` if you understand the risks and have appropriate mitigations in place. */ - dangerouslyAllowBrowser?: boolean; + dangerouslyAllowBrowser?: boolean | undefined; } /** From a796d21f06307419f352da8b9943f6745ff4084f Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 16 Jan 2025 16:33:38 +0000 Subject: [PATCH 087/246] feat(client): add Realtime API support (#1266) --- README.md | 87 ++++++++++++++++++++++++++ examples/package.json | 7 ++- examples/realtime/websocket.ts | 48 +++++++++++++++ examples/realtime/ws.ts | 55 +++++++++++++++++ package.json | 6 ++ src/beta/realtime/index.ts | 1 + src/beta/realtime/internal-base.ts | 83 +++++++++++++++++++++++++ src/beta/realtime/websocket.ts | 97 +++++++++++++++++++++++++++++ src/beta/realtime/ws.ts | 69 +++++++++++++++++++++ src/lib/EventEmitter.ts | 98 ++++++++++++++++++++++++++++++ yarn.lock | 12 ++++ 11 files changed, 560 insertions(+), 3 deletions(-) create mode 100644 examples/realtime/websocket.ts create mode 100644 examples/realtime/ws.ts create mode 100644 src/beta/realtime/index.ts create mode 100644 src/beta/realtime/internal-base.ts create mode 100644 src/beta/realtime/websocket.ts create mode 100644 src/beta/realtime/ws.ts create mode 100644 src/lib/EventEmitter.ts diff --git a/README.md b/README.md index 3039857a1..e7d69a669 100644 --- a/README.md +++ b/README.md @@ -83,6 +83,93 @@ main(); If you need to cancel a stream, you can `break` from the loop or call `stream.controller.abort()`. +## Realtime API beta + +The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as [function calling](https://platform.openai.com/docs/guides/function-calling) through a `WebSocket` connection. + +The Realtime API works through a combination of client-sent events and server-sent events. Clients can send events to do things like update session configuration or send text and audio inputs. Server events confirm when audio responses have completed, or when a text response from the model has been received. A full event reference can be found [here](https://platform.openai.com/docs/api-reference/realtime-client-events) and a guide can be found [here](https://platform.openai.com/docs/guides/realtime). + +This SDK supports accessing the Realtime API through the [WebSocket API](https://developer.mozilla.org/en-US/docs/Web/API/WebSocket) or with [ws](https://github.com/websockets/ws). + +Basic text based example with `ws`: + +```ts +// requires `yarn add ws @types/ws` +import { OpenAIRealtimeWS } from 'openai/beta/realtime/ws'; + +const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); + +// access the underlying `ws.WebSocket` instance +rt.socket.on('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); +}); + +rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; +}); + +rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); +}); + +rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); +rt.on('response.text.done', () => console.log()); + +rt.on('response.done', () => rt.close()); + +rt.socket.on('close', () => console.log('\nConnection closed!')); +``` + +To use the web API `WebSocket` implementation, replace `OpenAIRealtimeWS` with `OpenAIRealtimeWebSocket` and adjust any `rt.socket` access: + +```ts +import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; + +const rt = new OpenAIRealtimeWebSocket({ model: 'gpt-4o-realtime-preview-2024-12-17' }); +// ... +rt.socket.addEventListener('open', () => { + // ... +}); +``` + +A full example can be found [here](https://github.com/openai/openai-node/blob/master/examples/realtime/web.ts). + +### Realtime error handling + +When an error is encountered, either on the client side or returned from the server through the [`error` event](https://platform.openai.com/docs/guides/realtime/realtime-api-beta#handling-errors), the `error` event listener will be fired. However, if you haven't registered an `error` event listener then an `unhandled Promise rejection` error will be thrown. + +It is **highly recommended** that you register an `error` event listener and handle errors approriately as typically the underlying connection is still usable. + +```ts +const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); +rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; +}); +``` + ### Request & Response types This library includes TypeScript definitions for all request params and response fields. You may import and use them like so: diff --git a/examples/package.json b/examples/package.json index c8a5f7087..b8c34ac45 100644 --- a/examples/package.json +++ b/examples/package.json @@ -6,14 +6,15 @@ "license": "MIT", "private": true, "dependencies": { + "@azure/identity": "^4.2.0", "express": "^4.18.2", "next": "^14.1.1", "openai": "file:..", - "zod-to-json-schema": "^3.21.4", - "@azure/identity": "^4.2.0" + "zod-to-json-schema": "^3.21.4" }, "devDependencies": { "@types/body-parser": "^1.19.3", - "@types/express": "^4.17.19" + "@types/express": "^4.17.19", + "@types/web": "^0.0.194" } } diff --git a/examples/realtime/websocket.ts b/examples/realtime/websocket.ts new file mode 100644 index 000000000..0da131bc3 --- /dev/null +++ b/examples/realtime/websocket.ts @@ -0,0 +1,48 @@ +import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; + +async function main() { + const rt = new OpenAIRealtimeWebSocket({ model: 'gpt-4o-realtime-preview-2024-12-17' }); + + // access the underlying `ws.WebSocket` instance + rt.socket.addEventListener('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); + }); + + rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; + }); + + rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); + }); + + rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); + rt.on('response.text.done', () => console.log()); + + rt.on('response.done', () => rt.close()); + + rt.socket.addEventListener('close', () => console.log('\nConnection closed!')); +} + +main(); diff --git a/examples/realtime/ws.ts b/examples/realtime/ws.ts new file mode 100644 index 000000000..4bbe85e5d --- /dev/null +++ b/examples/realtime/ws.ts @@ -0,0 +1,55 @@ +import { OpenAIRealtimeWS } from 'openai/beta/realtime/ws'; + +async function main() { + const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); + + // access the underlying `ws.WebSocket` instance + rt.socket.on('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['foo'] as any, + model: 'gpt-4o-realtime-preview', + }, + }); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); + }); + + rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; + }); + + rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); + }); + + rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); + rt.on('response.text.done', () => console.log()); + + rt.on('response.done', () => rt.close()); + + rt.socket.on('close', () => console.log('\nConnection closed!')); +} + +main(); diff --git a/package.json b/package.json index ff6ec16bc..77e2d609f 100644 --- a/package.json +++ b/package.json @@ -36,6 +36,7 @@ "@swc/core": "^1.3.102", "@swc/jest": "^0.2.29", "@types/jest": "^29.4.0", + "@types/ws": "^8.5.13", "@typescript-eslint/eslint-plugin": "^6.7.0", "@typescript-eslint/parser": "^6.7.0", "eslint": "^8.49.0", @@ -52,6 +53,7 @@ "tsc-multi": "^1.1.0", "tsconfig-paths": "^4.0.0", "typescript": "^4.8.2", + "ws": "^8.18.0", "zod": "^3.23.8" }, "sideEffects": [ @@ -126,9 +128,13 @@ }, "bin": "./bin/cli", "peerDependencies": { + "ws": "^8.18.0", "zod": "^3.23.8" }, "peerDependenciesMeta": { + "ws": { + "optional": true + }, "zod": { "optional": true } diff --git a/src/beta/realtime/index.ts b/src/beta/realtime/index.ts new file mode 100644 index 000000000..75f0f3088 --- /dev/null +++ b/src/beta/realtime/index.ts @@ -0,0 +1 @@ +export { OpenAIRealtimeError } from './internal-base'; diff --git a/src/beta/realtime/internal-base.ts b/src/beta/realtime/internal-base.ts new file mode 100644 index 000000000..391d69911 --- /dev/null +++ b/src/beta/realtime/internal-base.ts @@ -0,0 +1,83 @@ +import { RealtimeClientEvent, RealtimeServerEvent, ErrorEvent } from '../../resources/beta/realtime/realtime'; +import { EventEmitter } from '../../lib/EventEmitter'; +import { OpenAIError } from '../../error'; + +export class OpenAIRealtimeError extends OpenAIError { + /** + * The error data that the API sent back in an `error` event. + */ + error?: ErrorEvent.Error | undefined; + + /** + * The unique ID of the server event. + */ + event_id?: string | undefined; + + constructor(message: string, event: ErrorEvent | null) { + super(message); + + this.error = event?.error; + this.event_id = event?.event_id; + } +} + +type Simplify = { [KeyType in keyof T]: T[KeyType] } & {}; + +type RealtimeEvents = Simplify< + { + event: (event: RealtimeServerEvent) => void; + error: (error: OpenAIRealtimeError) => void; + } & { + [EventType in Exclude]: ( + event: Extract, + ) => unknown; + } +>; + +export abstract class OpenAIRealtimeEmitter extends EventEmitter { + /** + * Send an event to the API. + */ + abstract send(event: RealtimeClientEvent): void; + + /** + * Close the websocket connection. + */ + abstract close(props?: { code: number; reason: string }): void; + + protected _onError(event: null, message: string, cause: any): void; + protected _onError(event: ErrorEvent, message?: string | undefined): void; + protected _onError(event: ErrorEvent | null, message?: string | undefined, cause?: any): void { + message = + event?.error ? + `${event.error.message} code=${event.error.code} param=${event.error.param} type=${event.error.type} event_id=${event.error.event_id}` + : message ?? 'unknown error'; + + if (!this._hasListener('error')) { + const error = new OpenAIRealtimeError( + message + + `\n\nTo resolve these unhandled rejection errors you should bind an \`error\` callback, e.g. \`rt.on('error', (error) => ...)\` `, + event, + ); + // @ts-ignore + error.cause = cause; + Promise.reject(error); + return; + } + + const error = new OpenAIRealtimeError(message, event); + // @ts-ignore + error.cause = cause; + + this._emit('error', error); + } +} + +export function buildRealtimeURL(props: { baseURL: string; model: string }): URL { + const path = '/realtime'; + + const url = new URL(props.baseURL + (props.baseURL.endsWith('/') ? path.slice(1) : path)); + url.protocol = 'wss'; + url.searchParams.set('model', props.model); + return url; +} diff --git a/src/beta/realtime/websocket.ts b/src/beta/realtime/websocket.ts new file mode 100644 index 000000000..e0853779d --- /dev/null +++ b/src/beta/realtime/websocket.ts @@ -0,0 +1,97 @@ +import { OpenAI } from '../../index'; +import { OpenAIError } from '../../error'; +import * as Core from '../../core'; +import type { RealtimeClientEvent, RealtimeServerEvent } from '../../resources/beta/realtime/realtime'; +import { OpenAIRealtimeEmitter, buildRealtimeURL } from './internal-base'; + +interface MessageEvent { + data: string; +} + +type _WebSocket = + typeof globalThis extends ( + { + WebSocket: infer ws; + } + ) ? + // @ts-ignore + InstanceType + : any; + +export class OpenAIRealtimeWebSocket extends OpenAIRealtimeEmitter { + url: URL; + socket: _WebSocket; + + constructor( + props: { + model: string; + dangerouslyAllowBrowser?: boolean; + }, + client?: Pick, + ) { + super(); + + const dangerouslyAllowBrowser = + props.dangerouslyAllowBrowser ?? + (client as any)?._options?.dangerouslyAllowBrowser ?? + (client?.apiKey.startsWith('ek_') ? true : null); + + if (!dangerouslyAllowBrowser && Core.isRunningInBrowser()) { + throw new OpenAIError( + "It looks like you're running in a browser-like environment.\n\nThis is disabled by default, as it risks exposing your secret API credentials to attackers.\n\nYou can avoid this error by creating an ephemeral session token:\nhttps://platform.openai.com/docs/api-reference/realtime-sessions\n", + ); + } + + client ??= new OpenAI({ dangerouslyAllowBrowser }); + + this.url = buildRealtimeURL({ baseURL: client.baseURL, model: props.model }); + // @ts-ignore + this.socket = new WebSocket(this.url, [ + 'realtime', + `openai-insecure-api-key.${client.apiKey}`, + 'openai-beta.realtime-v1', + ]); + + this.socket.addEventListener('message', (websocketEvent: MessageEvent) => { + const event = (() => { + try { + return JSON.parse(websocketEvent.data.toString()) as RealtimeServerEvent; + } catch (err) { + this._onError(null, 'could not parse websocket event', err); + return null; + } + })(); + + if (event) { + this._emit('event', event); + + if (event.type === 'error') { + this._onError(event); + } else { + // @ts-expect-error TS isn't smart enough to get the relationship right here + this._emit(event.type, event); + } + } + }); + + this.socket.addEventListener('error', (event: any) => { + this._onError(null, event.message, null); + }); + } + + send(event: RealtimeClientEvent) { + try { + this.socket.send(JSON.stringify(event)); + } catch (err) { + this._onError(null, 'could not send data', err); + } + } + + close(props?: { code: number; reason: string }) { + try { + this.socket.close(props?.code ?? 1000, props?.reason ?? 'OK'); + } catch (err) { + this._onError(null, 'could not close the connection', err); + } + } +} diff --git a/src/beta/realtime/ws.ts b/src/beta/realtime/ws.ts new file mode 100644 index 000000000..33bb11ad9 --- /dev/null +++ b/src/beta/realtime/ws.ts @@ -0,0 +1,69 @@ +import WS from 'ws'; +import { OpenAI } from '../../index'; +import type { RealtimeClientEvent, RealtimeServerEvent } from '../../resources/beta/realtime/realtime'; +import { OpenAIRealtimeEmitter, buildRealtimeURL } from './internal-base'; + +export class OpenAIRealtimeWS extends OpenAIRealtimeEmitter { + url: URL; + socket: WS.WebSocket; + + constructor( + props: { model: string; options?: WS.ClientOptions | undefined }, + client?: Pick, + ) { + super(); + client ??= new OpenAI(); + + this.url = buildRealtimeURL({ baseURL: client.baseURL, model: props.model }); + this.socket = new WS.WebSocket(this.url, { + ...props.options, + headers: { + ...props.options?.headers, + Authorization: `Bearer ${client.apiKey}`, + 'OpenAI-Beta': 'realtime=v1', + }, + }); + + this.socket.on('message', (wsEvent) => { + const event = (() => { + try { + return JSON.parse(wsEvent.toString()) as RealtimeServerEvent; + } catch (err) { + this._onError(null, 'could not parse websocket event', err); + return null; + } + })(); + + if (event) { + this._emit('event', event); + + if (event.type === 'error') { + this._onError(event); + } else { + // @ts-expect-error TS isn't smart enough to get the relationship right here + this._emit(event.type, event); + } + } + }); + + this.socket.on('error', (err) => { + this._onError(null, err.message, err); + }); + } + + send(event: RealtimeClientEvent) { + try { + this.socket.send(JSON.stringify(event)); + } catch (err) { + this._onError(null, 'could not send data', err); + } + } + + close(props?: { code: number; reason: string }) { + try { + this.socket.close(props?.code ?? 1000, props?.reason ?? 'OK'); + } catch (err) { + this._onError(null, 'could not close the connection', err); + } + } +} diff --git a/src/lib/EventEmitter.ts b/src/lib/EventEmitter.ts new file mode 100644 index 000000000..9adeebdc3 --- /dev/null +++ b/src/lib/EventEmitter.ts @@ -0,0 +1,98 @@ +type EventListener = Events[EventType]; + +type EventListeners = Array<{ + listener: EventListener; + once?: boolean; +}>; + +export type EventParameters = { + [Event in EventType]: EventListener extends (...args: infer P) => any ? P : never; +}[EventType]; + +export class EventEmitter any>> { + #listeners: { + [Event in keyof EventTypes]?: EventListeners; + } = {}; + + /** + * Adds the listener function to the end of the listeners array for the event. + * No checks are made to see if the listener has already been added. Multiple calls passing + * the same combination of event and listener will result in the listener being added, and + * called, multiple times. + * @returns this, so that calls can be chained + */ + on(event: Event, listener: EventListener): this { + const listeners: EventListeners = + this.#listeners[event] || (this.#listeners[event] = []); + listeners.push({ listener }); + return this; + } + + /** + * Removes the specified listener from the listener array for the event. + * off() will remove, at most, one instance of a listener from the listener array. If any single + * listener has been added multiple times to the listener array for the specified event, then + * off() must be called multiple times to remove each instance. + * @returns this, so that calls can be chained + */ + off(event: Event, listener: EventListener): this { + const listeners = this.#listeners[event]; + if (!listeners) return this; + const index = listeners.findIndex((l) => l.listener === listener); + if (index >= 0) listeners.splice(index, 1); + return this; + } + + /** + * Adds a one-time listener function for the event. The next time the event is triggered, + * this listener is removed and then invoked. + * @returns this, so that calls can be chained + */ + once(event: Event, listener: EventListener): this { + const listeners: EventListeners = + this.#listeners[event] || (this.#listeners[event] = []); + listeners.push({ listener, once: true }); + return this; + } + + /** + * This is similar to `.once()`, but returns a Promise that resolves the next time + * the event is triggered, instead of calling a listener callback. + * @returns a Promise that resolves the next time given event is triggered, + * or rejects if an error is emitted. (If you request the 'error' event, + * returns a promise that resolves with the error). + * + * Example: + * + * const message = await stream.emitted('message') // rejects if the stream errors + */ + emitted( + event: Event, + ): Promise< + EventParameters extends [infer Param] ? Param + : EventParameters extends [] ? void + : EventParameters + > { + return new Promise((resolve, reject) => { + // TODO: handle errors + this.once(event, resolve as any); + }); + } + + protected _emit( + this: EventEmitter, + event: Event, + ...args: EventParameters + ) { + const listeners: EventListeners | undefined = this.#listeners[event]; + if (listeners) { + this.#listeners[event] = listeners.filter((l) => !l.once) as any; + listeners.forEach(({ listener }: any) => listener(...(args as any))); + } + } + + protected _hasListener(event: keyof EventTypes): boolean { + const listeners = this.#listeners[event]; + return listeners && listeners.length > 0; + } +} diff --git a/yarn.lock b/yarn.lock index c0220f984..0a4307f70 100644 --- a/yarn.lock +++ b/yarn.lock @@ -881,6 +881,13 @@ resolved "https://registry.yarnpkg.com/@types/stack-utils/-/stack-utils-2.0.3.tgz#6209321eb2c1712a7e7466422b8cb1fc0d9dd5d8" integrity sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw== +"@types/ws@^8.5.13": + version "8.5.13" + resolved "https://registry.yarnpkg.com/@types/ws/-/ws-8.5.13.tgz#6414c280875e2691d0d1e080b05addbf5cb91e20" + integrity sha512-osM/gWBTPKgHV8XkTunnegTRIsvF6owmf5w+JtAfOw472dptdm0dlGv4xCt6GwQRcC2XVOvvRE/0bAoQcL2QkA== + dependencies: + "@types/node" "*" + "@types/yargs-parser@*": version "21.0.3" resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-21.0.3.tgz#815e30b786d2e8f0dcd85fd5bcf5e1a04d008f15" @@ -3472,6 +3479,11 @@ write-file-atomic@^4.0.2: imurmurhash "^0.1.4" signal-exit "^3.0.7" +ws@^8.18.0: + version "8.18.0" + resolved "https://registry.yarnpkg.com/ws/-/ws-8.18.0.tgz#0d7505a6eafe2b0e712d232b42279f53bc289bbc" + integrity sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw== + y18n@^5.0.5: version "5.0.8" resolved "https://registry.yarnpkg.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55" From 9d214eac82509028787b6ad148fec46689af74d3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 05:06:39 +0000 Subject: [PATCH 088/246] release: 4.79.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 21 +++++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 25 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3218ab333..a4062b378 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.78.1" + ".": "4.79.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 320d00140..c2021f78a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Changelog +## 4.79.0 (2025-01-17) + +Full Changelog: [v4.78.1...v4.79.0](https://github.com/openai/openai-node/compare/v4.78.1...v4.79.0) + +### Features + +* **client:** add Realtime API support ([#1266](https://github.com/openai/openai-node/issues/1266)) ([7160ebe](https://github.com/openai/openai-node/commit/7160ebe647769fbf48a600c9961d1a6f86dc9622)) + + +### Bug Fixes + +* **logs/azure:** redact sensitive header when DEBUG is set ([#1218](https://github.com/openai/openai-node/issues/1218)) ([6a72fd7](https://github.com/openai/openai-node/commit/6a72fd736733db19504a829bf203b39d5b9e3644)) + + +### Chores + +* fix streaming ([379c743](https://github.com/openai/openai-node/commit/379c7435ed5d508458e9cdc22386039b84fcec5e)) +* **internal:** streaming refactors ([#1261](https://github.com/openai/openai-node/issues/1261)) ([dd4af93](https://github.com/openai/openai-node/commit/dd4af939792583854a313367c5fe2f98eea2f3c8)) +* **types:** add `| undefined` to client options properties ([#1264](https://github.com/openai/openai-node/issues/1264)) ([5e56979](https://github.com/openai/openai-node/commit/5e569799b9ac8f915b16de90d91d38b568c1edce)) +* **types:** rename vector store chunking strategy ([#1263](https://github.com/openai/openai-node/issues/1263)) ([d31acee](https://github.com/openai/openai-node/commit/d31acee860c80ba945d4e70b956c7ed75f5f849a)) + ## 4.78.1 (2025-01-10) Full Changelog: [v4.78.0...v4.78.1](https://github.com/openai/openai-node/compare/v4.78.0...v4.78.1) diff --git a/jsr.json b/jsr.json index 257faa02d..ac02a7435 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.78.1", + "version": "4.79.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 77e2d609f..3b01be9fe 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.78.1", + "version": "4.79.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index a8ac58ba2..afc5d7104 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.78.1'; // x-release-please-version +export const VERSION = '4.79.0'; // x-release-please-version From 6cd83178324271763c3b3ba236ea5406c1447dd4 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 17 Jan 2025 19:39:08 +0000 Subject: [PATCH 089/246] fix(realtime): correct import syntax (#1267) --- src/beta/realtime/ws.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/beta/realtime/ws.ts b/src/beta/realtime/ws.ts index 33bb11ad9..631a36cd2 100644 --- a/src/beta/realtime/ws.ts +++ b/src/beta/realtime/ws.ts @@ -1,4 +1,4 @@ -import WS from 'ws'; +import * as WS from 'ws'; import { OpenAI } from '../../index'; import type { RealtimeClientEvent, RealtimeServerEvent } from '../../resources/beta/realtime/realtime'; import { OpenAIRealtimeEmitter, buildRealtimeURL } from './internal-base'; From 8383975a2e45aa222fcf56a45b38834bcf8b31c7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 19:39:37 +0000 Subject: [PATCH 090/246] release: 4.79.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a4062b378..8d95306a8 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.79.0" + ".": "4.79.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index c2021f78a..d24eeffa5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.79.1 (2025-01-17) + +Full Changelog: [v4.79.0...v4.79.1](https://github.com/openai/openai-node/compare/v4.79.0...v4.79.1) + +### Bug Fixes + +* **realtime:** correct import syntax ([#1267](https://github.com/openai/openai-node/issues/1267)) ([74702a7](https://github.com/openai/openai-node/commit/74702a739f566810d2b6c4e0832cfa17a1d1e272)) + ## 4.79.0 (2025-01-17) Full Changelog: [v4.78.1...v4.79.0](https://github.com/openai/openai-node/compare/v4.78.1...v4.79.0) diff --git a/jsr.json b/jsr.json index ac02a7435..9f4dbe4b6 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.79.0", + "version": "4.79.1", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 3b01be9fe..2984cf2d8 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.79.0", + "version": "4.79.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index afc5d7104..587a3c245 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.79.0'; // x-release-please-version +export const VERSION = '4.79.1'; // x-release-please-version From 6f3ad43ac1bbb8f8f6c8fae9e83398d85cead56c Mon Sep 17 00:00:00 2001 From: Kevin Whinnery Date: Fri, 17 Jan 2025 15:12:04 -0600 Subject: [PATCH 091/246] Create export for WebSocket on Deno/JSR --- jsr.json | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/jsr.json b/jsr.json index 9f4dbe4b6..72575a407 100644 --- a/jsr.json +++ b/jsr.json @@ -1,7 +1,10 @@ { "name": "@openai/openai", "version": "4.79.1", - "exports": "./index.ts", + "exports": { + ".": "./index.ts", + "./beta/realtime/websocket": "./beta/realtime/websocket.ts" + }, "publish": { "exclude": [ "!." From 4640dc608f7f55624656007207c49feb5f3047e3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 17:59:02 +0000 Subject: [PATCH 092/246] chore(internal): add test (#1270) --- tests/index.test.ts | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/index.test.ts b/tests/index.test.ts index 016d525f5..6227d6fbe 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -96,6 +96,15 @@ describe('instantiate client', () => { expect(response).toEqual({ url: 'http://localhost:5000/foo', custom: true }); }); + test('explicit global fetch', async () => { + // make sure the global fetch type is assignable to our Fetch type + const client = new OpenAI({ + baseURL: 'http://localhost:5000/', + apiKey: 'My API Key', + fetch: defaultFetch, + }); + }); + test('custom signal', async () => { const client = new OpenAI({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', From 95886b57b1373c16e12a0ee1288d68cd8520695d Mon Sep 17 00:00:00 2001 From: Ali Tabesh Date: Tue, 21 Jan 2025 13:19:22 +0330 Subject: [PATCH 093/246] docs(readme): fix Realtime API example link (#1272) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e7d69a669..3bd386e99 100644 --- a/README.md +++ b/README.md @@ -153,7 +153,7 @@ rt.socket.addEventListener('open', () => { }); ``` -A full example can be found [here](https://github.com/openai/openai-node/blob/master/examples/realtime/web.ts). +A full example can be found [here](https://github.com/openai/openai-node/blob/master/examples/realtime/websocket.ts). ### Realtime error handling From 53149de69e19836568c1f1083ee7ee3c07123d1a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 09:49:52 +0000 Subject: [PATCH 094/246] release: 4.79.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 8d95306a8..06a612d67 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.79.1" + ".": "4.79.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d24eeffa5..9151619f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.79.2 (2025-01-21) + +Full Changelog: [v4.79.1...v4.79.2](https://github.com/openai/openai-node/compare/v4.79.1...v4.79.2) + +### Chores + +* **internal:** add test ([#1270](https://github.com/openai/openai-node/issues/1270)) ([b7c2d3d](https://github.com/openai/openai-node/commit/b7c2d3d9abd315f1452a578b0fd0d82e6ac4ff60)) + + +### Documentation + +* **readme:** fix Realtime API example link ([#1272](https://github.com/openai/openai-node/issues/1272)) ([d0653c7](https://github.com/openai/openai-node/commit/d0653c7fef48360d137a7411dfdfb95d477cdbc5)) + ## 4.79.1 (2025-01-17) Full Changelog: [v4.79.0...v4.79.1](https://github.com/openai/openai-node/compare/v4.79.0...v4.79.1) diff --git a/jsr.json b/jsr.json index 9f4dbe4b6..ce967d67a 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.79.1", + "version": "4.79.2", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 2984cf2d8..07b2da77d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.79.1", + "version": "4.79.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 587a3c245..2cedb894b 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.79.1'; // x-release-please-version +export const VERSION = '4.79.2'; // x-release-please-version From e5e682f11783b14323f03ff9bf3298b8c6868136 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 21 Jan 2025 15:35:13 +0000 Subject: [PATCH 095/246] fix(jsr): export zod helpers --- jsr.json | 1 + 1 file changed, 1 insertion(+) diff --git a/jsr.json b/jsr.json index 35ee4e7ea..5819f2fa3 100644 --- a/jsr.json +++ b/jsr.json @@ -3,6 +3,7 @@ "version": "4.79.2", "exports": { ".": "./index.ts", + "./helpers/zod": "./helpers/zod.ts", "./beta/realtime/websocket": "./beta/realtime/websocket.ts" }, "publish": { From f5139d4aa281bd9a20b8cf5c801843f4d6c4bb3b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 15:53:54 +0000 Subject: [PATCH 096/246] release: 4.79.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 06a612d67..cdd63a113 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.79.2" + ".": "4.79.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 9151619f9..8a1ce156f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.79.3 (2025-01-21) + +Full Changelog: [v4.79.2...v4.79.3](https://github.com/openai/openai-node/compare/v4.79.2...v4.79.3) + +### Bug Fixes + +* **jsr:** export zod helpers ([9dc55b6](https://github.com/openai/openai-node/commit/9dc55b62b564ad5ad1d4a60fe520b68235d05296)) + ## 4.79.2 (2025-01-21) Full Changelog: [v4.79.1...v4.79.2](https://github.com/openai/openai-node/compare/v4.79.1...v4.79.2) diff --git a/jsr.json b/jsr.json index 5819f2fa3..c070e4983 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.79.2", + "version": "4.79.3", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 07b2da77d..342f7c539 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.79.2", + "version": "4.79.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 2cedb894b..c2097ae42 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.79.2'; // x-release-please-version +export const VERSION = '4.79.3'; // x-release-please-version From a1d0ddc3b27b15700e355a476e8d183dae43987c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 17:22:40 +0000 Subject: [PATCH 097/246] docs: update deprecation messages (#1275) --- src/resources/chat/completions.ts | 24 ++++++++++++------------ src/resources/files.ts | 4 ++-- src/resources/fine-tuning/jobs/jobs.ts | 2 +- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 31f5814cb..88c778036 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -163,8 +163,8 @@ export interface ChatCompletionAssistantMessageParam { content?: string | Array | null; /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. */ function_call?: ChatCompletionAssistantMessageParam.FunctionCall | null; @@ -198,8 +198,8 @@ export namespace ChatCompletionAssistantMessageParam { } /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. */ export interface FunctionCall { /** @@ -360,8 +360,8 @@ export namespace ChatCompletionChunk { content?: string | null; /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. */ function_call?: Delta.FunctionCall; @@ -380,8 +380,8 @@ export namespace ChatCompletionChunk { export namespace Delta { /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. */ export interface FunctionCall { /** @@ -620,8 +620,8 @@ export interface ChatCompletionMessage { audio?: ChatCompletionAudio | null; /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. */ function_call?: ChatCompletionMessage.FunctionCall | null; @@ -633,8 +633,8 @@ export interface ChatCompletionMessage { export namespace ChatCompletionMessage { /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. */ export interface FunctionCall { /** diff --git a/src/resources/files.ts b/src/resources/files.ts index 43708310b..67bc95469 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -168,13 +168,13 @@ export interface FileObject { | 'vision'; /** - * @deprecated: Deprecated. The current status of the file, which can be either + * @deprecated Deprecated. The current status of the file, which can be either * `uploaded`, `processed`, or `error`. */ status: 'uploaded' | 'processed' | 'error'; /** - * @deprecated: Deprecated. For details on why a fine-tuning training file failed + * @deprecated Deprecated. For details on why a fine-tuning training file failed * validation, see the `error` field on `fine_tuning.job`. */ status_details?: string; diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 44dd011aa..9be03c302 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -516,7 +516,7 @@ export interface JobCreateParams { export namespace JobCreateParams { /** - * @deprecated: The hyperparameters used for the fine-tuning job. This value is now + * @deprecated The hyperparameters used for the fine-tuning job. This value is now * deprecated in favor of `method`, and should be passed in under the `method` * parameter. */ From c85dc9793ab6fb318b9ece1a557c4e00024265c1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 21:00:49 +0000 Subject: [PATCH 098/246] chore(internal): minor restructuring (#1278) --- src/internal/decoders/line.ts | 2 +- src/internal/stream-utils.ts | 32 +++++++++++++++++++++++++++++ src/streaming.ts | 38 +++-------------------------------- 3 files changed, 36 insertions(+), 36 deletions(-) create mode 100644 src/internal/stream-utils.ts diff --git a/src/internal/decoders/line.ts b/src/internal/decoders/line.ts index 1e0bbf390..34e41d1dc 100644 --- a/src/internal/decoders/line.ts +++ b/src/internal/decoders/line.ts @@ -1,6 +1,6 @@ import { OpenAIError } from '../../error'; -type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; +export type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; /** * A re-implementation of httpx's `LineDecoder` in Python that handles incrementally diff --git a/src/internal/stream-utils.ts b/src/internal/stream-utils.ts new file mode 100644 index 000000000..37f7793cf --- /dev/null +++ b/src/internal/stream-utils.ts @@ -0,0 +1,32 @@ +/** + * Most browsers don't yet have async iterable support for ReadableStream, + * and Node has a very different way of reading bytes from its "ReadableStream". + * + * This polyfill was pulled from https://github.com/MattiasBuelens/web-streams-polyfill/pull/122#issuecomment-1627354490 + */ +export function ReadableStreamToAsyncIterable(stream: any): AsyncIterableIterator { + if (stream[Symbol.asyncIterator]) return stream; + + const reader = stream.getReader(); + return { + async next() { + try { + const result = await reader.read(); + if (result?.done) reader.releaseLock(); // release lock when stream becomes closed + return result; + } catch (e) { + reader.releaseLock(); // release lock when stream becomes errored + throw e; + } + }, + async return() { + const cancelPromise = reader.cancel(); + reader.releaseLock(); + await cancelPromise; + return { done: true, value: undefined }; + }, + [Symbol.asyncIterator]() { + return this; + }, + }; +} diff --git a/src/streaming.ts b/src/streaming.ts index 2891e6ac3..6a57a50a0 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -1,6 +1,7 @@ import { ReadableStream, type Response } from './_shims/index'; import { OpenAIError } from './error'; import { LineDecoder } from './internal/decoders/line'; +import { ReadableStreamToAsyncIterable } from './internal/stream-utils'; import { APIError } from './error'; @@ -96,7 +97,7 @@ export class Stream implements AsyncIterable { async function* iterLines(): AsyncGenerator { const lineDecoder = new LineDecoder(); - const iter = readableStreamAsyncIterable(readableStream); + const iter = ReadableStreamToAsyncIterable(readableStream); for await (const chunk of iter) { for (const line of lineDecoder.decode(chunk)) { yield line; @@ -210,7 +211,7 @@ export async function* _iterSSEMessages( const sseDecoder = new SSEDecoder(); const lineDecoder = new LineDecoder(); - const iter = readableStreamAsyncIterable(response.body); + const iter = ReadableStreamToAsyncIterable(response.body); for await (const sseChunk of iterSSEChunks(iter)) { for (const line of lineDecoder.decode(sseChunk)) { const sse = sseDecoder.decode(line); @@ -363,36 +364,3 @@ function partition(str: string, delimiter: string): [string, string, string] { return [str, '', '']; } - -/** - * Most browsers don't yet have async iterable support for ReadableStream, - * and Node has a very different way of reading bytes from its "ReadableStream". - * - * This polyfill was pulled from https://github.com/MattiasBuelens/web-streams-polyfill/pull/122#issuecomment-1627354490 - */ -export function readableStreamAsyncIterable(stream: any): AsyncIterableIterator { - if (stream[Symbol.asyncIterator]) return stream; - - const reader = stream.getReader(); - return { - async next() { - try { - const result = await reader.read(); - if (result?.done) reader.releaseLock(); // release lock when stream becomes closed - return result; - } catch (e) { - reader.releaseLock(); // release lock when stream becomes errored - throw e; - } - }, - async return() { - const cancelPromise = reader.cancel(); - reader.releaseLock(); - await cancelPromise; - return { done: true, value: undefined }; - }, - [Symbol.asyncIterator]() { - return this; - }, - }; -} From e5aba740d98541e9ca7cb01998c27033c0f03c5f Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 21 Jan 2025 22:36:52 +0000 Subject: [PATCH 099/246] fix(jsr): correct zod config --- jsr.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/jsr.json b/jsr.json index c070e4983..8c24896f7 100644 --- a/jsr.json +++ b/jsr.json @@ -6,6 +6,9 @@ "./helpers/zod": "./helpers/zod.ts", "./beta/realtime/websocket": "./beta/realtime/websocket.ts" }, + "imports": { + "zod": "npm:zod@3" + }, "publish": { "exclude": [ "!." From 0fae08b33e6963c6b46e6318f23bada01d18f19f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 22:37:23 +0000 Subject: [PATCH 100/246] release: 4.79.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 18 ++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 22 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index cdd63a113..b1ab5c7b9 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.79.3" + ".": "4.79.4" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a1ce156f..4254a9b8f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## 4.79.4 (2025-01-21) + +Full Changelog: [v4.79.3...v4.79.4](https://github.com/openai/openai-node/compare/v4.79.3...v4.79.4) + +### Bug Fixes + +* **jsr:** correct zod config ([e45fa5f](https://github.com/openai/openai-node/commit/e45fa5f535ca74789636001e60e33edcad4db83c)) + + +### Chores + +* **internal:** minor restructuring ([#1278](https://github.com/openai/openai-node/issues/1278)) ([58ea92a](https://github.com/openai/openai-node/commit/58ea92a7464a04223f24ba31dbc0f7d0cf99cc19)) + + +### Documentation + +* update deprecation messages ([#1275](https://github.com/openai/openai-node/issues/1275)) ([1c6599e](https://github.com/openai/openai-node/commit/1c6599e47ef75a71cb309a1e14d97bc97bd036d0)) + ## 4.79.3 (2025-01-21) Full Changelog: [v4.79.2...v4.79.3](https://github.com/openai/openai-node/compare/v4.79.2...v4.79.3) diff --git a/jsr.json b/jsr.json index 8c24896f7..e6d772116 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.79.3", + "version": "4.79.4", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 342f7c539..d7a5555e5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.79.3", + "version": "4.79.4", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c2097ae42..e8b9601ed 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.79.3'; // x-release-please-version +export const VERSION = '4.79.4'; // x-release-please-version From 74776c6923b36b8b610063e0f5d8773bbd94313f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 22 Jan 2025 13:20:44 +0000 Subject: [PATCH 101/246] feat(api): update enum values, comments, and examples (#1280) --- .stats.yml | 2 +- src/resources/audio/speech.ts | 8 ++--- src/resources/beta/realtime/realtime.ts | 32 +++++++++++-------- src/resources/beta/realtime/sessions.ts | 30 ++++++++++------- src/resources/chat/completions.ts | 9 ++---- src/resources/embeddings.ts | 3 +- .../beta/realtime/sessions.test.ts | 27 ++-------------- tests/api-resources/chat/completions.test.ts | 2 +- tests/api-resources/completions.test.ts | 2 +- 9 files changed, 49 insertions(+), 66 deletions(-) diff --git a/.stats.yml b/.stats.yml index 9600edae3..d518bac58 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b5b0e2c794b012919701c3fd43286af10fa25d33ceb8a881bec2636028f446e0.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3904ef6b29a89c98f93a9b7da19879695f3c440564be6384db7af1b734611ede.yml diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index bd2ed9f65..35e82c4c1 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -33,12 +33,12 @@ export interface SpeechCreateParams { model: (string & {}) | SpeechModel; /** - * The voice to use when generating the audio. Supported voices are `alloy`, - * `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are - * available in the + * The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + * `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the + * voices are available in the * [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). */ - voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer'; + voice: 'alloy' | 'ash' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer'; /** * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index 5de06917a..0fb66eb49 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -173,9 +173,10 @@ export interface ConversationItemCreateEvent { /** * The ID of the preceding item after which the new item will be inserted. If not - * set, the new item will be appended to the end of the conversation. If set, it - * allows an item to be inserted mid-conversation. If the ID cannot be found, an - * error will be returned and the item will not be added. + * set, the new item will be appended to the end of the conversation. If set to + * `root`, the new item will be added to the beginning of the conversation. If set + * to an existing ID, it allows an item to be inserted mid-conversation. If the ID + * cannot be found, an error will be returned and the item will not be added. */ previous_item_id?: string; } @@ -1705,17 +1706,9 @@ export namespace SessionUpdateEvent { */ export interface Session { /** - * The Realtime model used for this session. - */ - model: - | 'gpt-4o-realtime-preview' - | 'gpt-4o-realtime-preview-2024-10-01' - | 'gpt-4o-realtime-preview-2024-12-17' - | 'gpt-4o-mini-realtime-preview' - | 'gpt-4o-mini-realtime-preview-2024-12-17'; - - /** - * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. */ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; @@ -1756,8 +1749,19 @@ export namespace SessionUpdateEvent { */ modalities?: Array<'text' | 'audio'>; + /** + * The Realtime model used for this session. + */ + model?: + | 'gpt-4o-realtime-preview' + | 'gpt-4o-realtime-preview-2024-10-01' + | 'gpt-4o-realtime-preview-2024-12-17' + | 'gpt-4o-mini-realtime-preview' + | 'gpt-4o-mini-realtime-preview-2024-12-17'; + /** * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + * For `pcm16`, output audio is sampled at a rate of 24kHz. */ output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts index c1082d236..68c48db59 100644 --- a/src/resources/beta/realtime/sessions.ts +++ b/src/resources/beta/realtime/sessions.ts @@ -32,7 +32,9 @@ export interface Session { id?: string; /** - * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. */ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; @@ -86,6 +88,7 @@ export interface Session { /** * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + * For `pcm16`, output audio is sampled at a rate of 24kHz. */ output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; @@ -372,17 +375,9 @@ export namespace SessionCreateResponse { export interface SessionCreateParams { /** - * The Realtime model used for this session. - */ - model: - | 'gpt-4o-realtime-preview' - | 'gpt-4o-realtime-preview-2024-10-01' - | 'gpt-4o-realtime-preview-2024-12-17' - | 'gpt-4o-mini-realtime-preview' - | 'gpt-4o-mini-realtime-preview-2024-12-17'; - - /** - * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. */ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; @@ -423,8 +418,19 @@ export interface SessionCreateParams { */ modalities?: Array<'text' | 'audio'>; + /** + * The Realtime model used for this session. + */ + model?: + | 'gpt-4o-realtime-preview' + | 'gpt-4o-realtime-preview-2024-10-01' + | 'gpt-4o-realtime-preview-2024-12-17' + | 'gpt-4o-mini-realtime-preview' + | 'gpt-4o-mini-realtime-preview-2024-12-17'; + /** * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + * For `pcm16`, output audio is sampled at a rate of 24kHz. */ output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 88c778036..683eb5ed4 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -76,8 +76,7 @@ export interface ChatCompletion { object: 'chat.completion'; /** - * The service tier used for processing the request. This field is only included if - * the `service_tier` parameter is specified in the request. + * The service tier used for processing the request. */ service_tier?: 'scale' | 'default' | null; @@ -300,8 +299,7 @@ export interface ChatCompletionChunk { object: 'chat.completion.chunk'; /** - * The service tier used for processing the request. This field is only included if - * the `service_tier` parameter is specified in the request. + * The service tier used for processing the request. */ service_tier?: 'scale' | 'default' | null; @@ -1115,9 +1113,6 @@ export interface ChatCompletionCreateParamsBase { * - If set to 'default', the request will be processed using the default service * tier with a lower uptime SLA and no latency guarentee. * - When not set, the default behavior is 'auto'. - * - * When this parameter is set, the response body will include the `service_tier` - * utilized. */ service_tier?: 'auto' | 'default' | null; diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index 4b1644a68..d01ffc807 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -86,7 +86,8 @@ export interface EmbeddingCreateParams { * `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 * dimensions or less. * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - * for counting tokens. + * for counting tokens. Some models may also impose a limit on total number of + * tokens summed across inputs. */ input: string | Array | Array | Array>; diff --git a/tests/api-resources/beta/realtime/sessions.test.ts b/tests/api-resources/beta/realtime/sessions.test.ts index 0ed998c27..dbb92ead3 100644 --- a/tests/api-resources/beta/realtime/sessions.test.ts +++ b/tests/api-resources/beta/realtime/sessions.test.ts @@ -9,8 +9,8 @@ const client = new OpenAI({ }); describe('resource sessions', () => { - test('create: only required params', async () => { - const responsePromise = client.beta.realtime.sessions.create({ model: 'gpt-4o-realtime-preview' }); + test('create', async () => { + const responsePromise = client.beta.realtime.sessions.create({}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -19,27 +19,4 @@ describe('resource sessions', () => { expect(dataAndResponse.data).toBe(response); expect(dataAndResponse.response).toBe(rawResponse); }); - - test('create: required and optional params', async () => { - const response = await client.beta.realtime.sessions.create({ - model: 'gpt-4o-realtime-preview', - input_audio_format: 'pcm16', - input_audio_transcription: { model: 'model' }, - instructions: 'instructions', - max_response_output_tokens: 0, - modalities: ['text'], - output_audio_format: 'pcm16', - temperature: 0, - tool_choice: 'tool_choice', - tools: [{ description: 'description', name: 'name', parameters: {}, type: 'function' }], - turn_detection: { - create_response: true, - prefix_padding_ms: 0, - silence_duration_ms: 0, - threshold: 0, - type: 'type', - }, - voice: 'alloy', - }); - }); }); diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index dfc09f69b..8f1bc7d4c 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -43,7 +43,7 @@ describe('resource completions', () => { presence_penalty: -2, reasoning_effort: 'low', response_format: { type: 'text' }, - seed: -9007199254740991, + seed: 0, service_tier: 'auto', stop: 'string', store: true, diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts index 82322dc3a..c98501a87 100644 --- a/tests/api-resources/completions.test.ts +++ b/tests/api-resources/completions.test.ts @@ -32,7 +32,7 @@ describe('resource completions', () => { max_tokens: 16, n: 1, presence_penalty: -2, - seed: -9007199254740991, + seed: 0, stop: '\n', stream: false, stream_options: { include_usage: true }, From 180b9ca1b5472d7697202a9220960a948bfbb9c8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 22 Jan 2025 13:21:16 +0000 Subject: [PATCH 102/246] release: 4.80.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b1ab5c7b9..a21d67d78 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.79.4" + ".": "4.80.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 4254a9b8f..9126bf6a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.80.0 (2025-01-22) + +Full Changelog: [v4.79.4...v4.80.0](https://github.com/openai/openai-node/compare/v4.79.4...v4.80.0) + +### Features + +* **api:** update enum values, comments, and examples ([#1280](https://github.com/openai/openai-node/issues/1280)) ([d38f2c2](https://github.com/openai/openai-node/commit/d38f2c2648b6990f217c3c7d83ca31f3739641d3)) + ## 4.79.4 (2025-01-21) Full Changelog: [v4.79.3...v4.79.4](https://github.com/openai/openai-node/compare/v4.79.3...v4.79.4) diff --git a/jsr.json b/jsr.json index e6d772116..d79b07c2f 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.79.4", + "version": "4.80.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index d7a5555e5..fd85ffdd0 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.79.4", + "version": "4.80.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index e8b9601ed..c9b6787c2 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.79.4'; // x-release-please-version +export const VERSION = '4.80.0'; // x-release-please-version From b7ab6bb304973ade94830f37eb646e800226d5ef Mon Sep 17 00:00:00 2001 From: hi019 <65871571+hi019@users.noreply.github.com> Date: Wed, 22 Jan 2025 12:57:18 -0800 Subject: [PATCH 103/246] docs: fix typo, "zodFunctionTool" -> "zodFunction" (#1128) --- helpers.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers.md b/helpers.md index abf980c82..aa3775a54 100644 --- a/helpers.md +++ b/helpers.md @@ -49,7 +49,7 @@ if (message?.parsed) { The `.parse()` method will also automatically parse `function` tool calls if: -- You use the `zodFunctionTool()` helper method +- You use the `zodFunction()` helper method - You mark your tool schema with `"strict": True` For example: From 9bfb778d547c34a6b7ed4168251786b1d6723985 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 22 Jan 2025 20:34:34 +0000 Subject: [PATCH 104/246] fix(azure): include retry count header --- src/index.ts | 7 +++++-- tests/lib/azure.test.ts | 12 ++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/src/index.ts b/src/index.ts index cf6aa89e3..944def00f 100644 --- a/src/index.ts +++ b/src/index.ts @@ -577,7 +577,10 @@ export class AzureOpenAI extends OpenAI { this._deployment = deployment; } - override buildRequest(options: Core.FinalRequestOptions): { + override buildRequest( + options: Core.FinalRequestOptions, + props: { retryCount?: number } = {}, + ): { req: RequestInit; url: string; timeout: number; @@ -591,7 +594,7 @@ export class AzureOpenAI extends OpenAI { options.path = `/deployments/${model}${options.path}`; } } - return super.buildRequest(options); + return super.buildRequest(options, props); } private async _getAzureADToken(): Promise { diff --git a/tests/lib/azure.test.ts b/tests/lib/azure.test.ts index 064a0098c..0e3c2c5a3 100644 --- a/tests/lib/azure.test.ts +++ b/tests/lib/azure.test.ts @@ -51,6 +51,18 @@ describe('instantiate azure client', () => { }); expect(req.headers as Headers).not.toHaveProperty('x-my-default-header'); }); + + test('includes retry count', () => { + const { req } = client.buildRequest( + { + path: '/foo', + method: 'post', + headers: { 'X-My-Default-Header': null }, + }, + { retryCount: 1 }, + ); + expect((req.headers as Headers)['x-stainless-retry-count']).toEqual('1'); + }); }); describe('defaultQuery', () => { From 654a2ac33d6b0bab723ec30ab734bbd9b693bbf3 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 22 Jan 2025 20:53:36 +0000 Subject: [PATCH 105/246] docs(helpers): fix type annotation --- helpers.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers.md b/helpers.md index aa3775a54..16bc1f277 100644 --- a/helpers.md +++ b/helpers.md @@ -226,7 +226,7 @@ on in the documentation page [Message](https://platform.openai.com/docs/api-refe ```ts .on('textCreated', (content: Text) => ...) -.on('textDelta', (delta: RunStepDelta, snapshot: Text) => ...) +.on('textDelta', (delta: TextDelta, snapshot: Text) => ...) .on('textDone', (content: Text, snapshot: Message) => ...) ``` From 3fcded9eb387e39bdf03a06b701710cf3075f990 Mon Sep 17 00:00:00 2001 From: Guspan Tanadi <36249910+guspan-tanadi@users.noreply.github.com> Date: Fri, 24 Jan 2025 19:16:27 +0700 Subject: [PATCH 106/246] docs(readme): fix realtime errors docs link (#1286) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3bd386e99..012511412 100644 --- a/README.md +++ b/README.md @@ -157,7 +157,7 @@ A full example can be found [here](https://github.com/openai/openai-node/blob/ma ### Realtime error handling -When an error is encountered, either on the client side or returned from the server through the [`error` event](https://platform.openai.com/docs/guides/realtime/realtime-api-beta#handling-errors), the `error` event listener will be fired. However, if you haven't registered an `error` event listener then an `unhandled Promise rejection` error will be thrown. +When an error is encountered, either on the client side or returned from the server through the [`error` event](https://platform.openai.com/docs/guides/realtime-model-capabilities#error-handling), the `error` event listener will be fired. However, if you haven't registered an `error` event listener then an `unhandled Promise rejection` error will be thrown. It is **highly recommended** that you register an `error` event listener and handle errors approriately as typically the underlying connection is still usable. From fb61fc2db45d2fb1f25016b70608714a93a80c9d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 24 Jan 2025 12:16:56 +0000 Subject: [PATCH 107/246] release: 4.80.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 15 +++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 19 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a21d67d78..d140407b9 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.80.0" + ".": "4.80.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 9126bf6a2..e4d4d73b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## 4.80.1 (2025-01-24) + +Full Changelog: [v4.80.0...v4.80.1](https://github.com/openai/openai-node/compare/v4.80.0...v4.80.1) + +### Bug Fixes + +* **azure:** include retry count header ([3e0ba40](https://github.com/openai/openai-node/commit/3e0ba409e57ce276fb1f95cd11c801e4ccaad572)) + + +### Documentation + +* fix typo, "zodFunctionTool" -> "zodFunction" ([#1128](https://github.com/openai/openai-node/issues/1128)) ([b7ab6bb](https://github.com/openai/openai-node/commit/b7ab6bb304973ade94830f37eb646e800226d5ef)) +* **helpers:** fix type annotation ([fc019df](https://github.com/openai/openai-node/commit/fc019df1d9cc276e8f8e689742853a09aa94991a)) +* **readme:** fix realtime errors docs link ([#1286](https://github.com/openai/openai-node/issues/1286)) ([d1d50c8](https://github.com/openai/openai-node/commit/d1d50c897c18cefea964e8057fe1acfd766ae2bf)) + ## 4.80.0 (2025-01-22) Full Changelog: [v4.79.4...v4.80.0](https://github.com/openai/openai-node/compare/v4.79.4...v4.80.0) diff --git a/jsr.json b/jsr.json index d79b07c2f..e2ecad87f 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.80.0", + "version": "4.80.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index fd85ffdd0..497c7fae9 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.80.0", + "version": "4.80.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c9b6787c2..7d762daed 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.80.0'; // x-release-please-version +export const VERSION = '4.80.1'; // x-release-please-version From b4bb01ddd9f1c1f6ae41ddc11a9e1b707ef04764 Mon Sep 17 00:00:00 2001 From: Deyaaeldeen Almahallawi Date: Wed, 29 Jan 2025 09:45:25 -0600 Subject: [PATCH 108/246] feat(azure): Realtime API support (#1287) --- README.md | 22 ++++++++- examples/{azure.ts => azure/chat.ts} | 3 +- examples/azure/realtime/websocket.ts | 60 +++++++++++++++++++++++++ examples/azure/realtime/ws.ts | 67 ++++++++++++++++++++++++++++ examples/package.json | 1 + examples/realtime/ws.ts | 2 +- src/beta/realtime/internal-base.ts | 18 ++++++-- src/beta/realtime/websocket.ts | 54 ++++++++++++++++++++-- src/beta/realtime/ws.ts | 35 +++++++++++++-- src/index.ts | 8 ++-- 10 files changed, 251 insertions(+), 19 deletions(-) rename examples/{azure.ts => azure/chat.ts} (91%) create mode 100644 examples/azure/realtime/websocket.ts create mode 100644 examples/azure/realtime/ws.ts diff --git a/README.md b/README.md index 012511412..a1f4bf760 100644 --- a/README.md +++ b/README.md @@ -499,7 +499,7 @@ const credential = new DefaultAzureCredential(); const scope = 'https://cognitiveservices.azure.com/.default'; const azureADTokenProvider = getBearerTokenProvider(credential, scope); -const openai = new AzureOpenAI({ azureADTokenProvider }); +const openai = new AzureOpenAI({ azureADTokenProvider, apiVersion: "" }); const result = await openai.chat.completions.create({ model: 'gpt-4o', @@ -509,6 +509,26 @@ const result = await openai.chat.completions.create({ console.log(result.choices[0]!.message?.content); ``` +### Realtime API +This SDK provides real-time streaming capabilities for Azure OpenAI through the `OpenAIRealtimeWS` and `OpenAIRealtimeWebSocket` clients described previously. + +To utilize the real-time features, begin by creating a fully configured `AzureOpenAI` client and passing it into either `OpenAIRealtimeWS.azure` or `OpenAIRealtimeWebSocket.azure`. For example: + +```ts +const cred = new DefaultAzureCredential(); +const scope = 'https://cognitiveservices.azure.com/.default'; +const deploymentName = 'gpt-4o-realtime-preview-1001'; +const azureADTokenProvider = getBearerTokenProvider(cred, scope); +const client = new AzureOpenAI({ + azureADTokenProvider, + apiVersion: '2024-10-01-preview', + deployment: deploymentName, +}); +const rt = await OpenAIRealtimeWS.azure(client); +``` + +Once the instance has been created, you can then begin sending requests and receiving streaming responses in real time. + ### Retries Certain errors will be automatically retried 2 times by default, with a short exponential backoff. diff --git a/examples/azure.ts b/examples/azure/chat.ts similarity index 91% rename from examples/azure.ts rename to examples/azure/chat.ts index 5fe1718fa..46df820f8 100755 --- a/examples/azure.ts +++ b/examples/azure/chat.ts @@ -2,6 +2,7 @@ import { AzureOpenAI } from 'openai'; import { getBearerTokenProvider, DefaultAzureCredential } from '@azure/identity'; +import 'dotenv/config'; // Corresponds to your Model deployment within your OpenAI resource, e.g. gpt-4-1106-preview // Navigate to the Azure OpenAI Studio to deploy a model. @@ -13,7 +14,7 @@ const azureADTokenProvider = getBearerTokenProvider(credential, scope); // Make sure to set AZURE_OPENAI_ENDPOINT with the endpoint of your Azure resource. // You can find it in the Azure Portal. -const openai = new AzureOpenAI({ azureADTokenProvider }); +const openai = new AzureOpenAI({ azureADTokenProvider, apiVersion: '2024-10-01-preview' }); async function main() { console.log('Non-streaming:'); diff --git a/examples/azure/realtime/websocket.ts b/examples/azure/realtime/websocket.ts new file mode 100644 index 000000000..bec74e654 --- /dev/null +++ b/examples/azure/realtime/websocket.ts @@ -0,0 +1,60 @@ +import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; +import { AzureOpenAI } from 'openai'; +import { DefaultAzureCredential, getBearerTokenProvider } from '@azure/identity'; +import 'dotenv/config'; + +async function main() { + const cred = new DefaultAzureCredential(); + const scope = 'https://cognitiveservices.azure.com/.default'; + const deploymentName = 'gpt-4o-realtime-preview-1001'; + const azureADTokenProvider = getBearerTokenProvider(cred, scope); + const client = new AzureOpenAI({ + azureADTokenProvider, + apiVersion: '2024-10-01-preview', + deployment: deploymentName, + }); + const rt = await OpenAIRealtimeWebSocket.azure(client); + + // access the underlying `ws.WebSocket` instance + rt.socket.addEventListener('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); + }); + + rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; + }); + + rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); + }); + + rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); + rt.on('response.text.done', () => console.log()); + + rt.on('response.done', () => rt.close()); + + rt.socket.addEventListener('close', () => console.log('\nConnection closed!')); +} + +main(); diff --git a/examples/azure/realtime/ws.ts b/examples/azure/realtime/ws.ts new file mode 100644 index 000000000..ae20a1438 --- /dev/null +++ b/examples/azure/realtime/ws.ts @@ -0,0 +1,67 @@ +import { DefaultAzureCredential, getBearerTokenProvider } from '@azure/identity'; +import { OpenAIRealtimeWS } from 'openai/beta/realtime/ws'; +import { AzureOpenAI } from 'openai'; +import 'dotenv/config'; + +async function main() { + const cred = new DefaultAzureCredential(); + const scope = 'https://cognitiveservices.azure.com/.default'; + const deploymentName = 'gpt-4o-realtime-preview-1001'; + const azureADTokenProvider = getBearerTokenProvider(cred, scope); + const client = new AzureOpenAI({ + azureADTokenProvider, + apiVersion: '2024-10-01-preview', + deployment: deploymentName, + }); + const rt = await OpenAIRealtimeWS.azure(client); + + // access the underlying `ws.WebSocket` instance + rt.socket.on('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); + }); + + rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; + }); + + rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); + }); + + rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); + rt.on('response.text.done', () => console.log()); + + rt.on('response.done', () => rt.close()); + + rt.socket.on('close', () => console.log('\nConnection closed!')); +} + +main(); diff --git a/examples/package.json b/examples/package.json index b8c34ac45..70ec2c523 100644 --- a/examples/package.json +++ b/examples/package.json @@ -7,6 +7,7 @@ "private": true, "dependencies": { "@azure/identity": "^4.2.0", + "dotenv": "^16.4.7", "express": "^4.18.2", "next": "^14.1.1", "openai": "file:..", diff --git a/examples/realtime/ws.ts b/examples/realtime/ws.ts index 4bbe85e5d..bba140800 100644 --- a/examples/realtime/ws.ts +++ b/examples/realtime/ws.ts @@ -9,7 +9,7 @@ async function main() { rt.send({ type: 'session.update', session: { - modalities: ['foo'] as any, + modalities: ['text'], model: 'gpt-4o-realtime-preview', }, }); diff --git a/src/beta/realtime/internal-base.ts b/src/beta/realtime/internal-base.ts index 391d69911..b704812ee 100644 --- a/src/beta/realtime/internal-base.ts +++ b/src/beta/realtime/internal-base.ts @@ -1,6 +1,7 @@ import { RealtimeClientEvent, RealtimeServerEvent, ErrorEvent } from '../../resources/beta/realtime/realtime'; import { EventEmitter } from '../../lib/EventEmitter'; import { OpenAIError } from '../../error'; +import OpenAI, { AzureOpenAI } from '../../index'; export class OpenAIRealtimeError extends OpenAIError { /** @@ -73,11 +74,20 @@ export abstract class OpenAIRealtimeEmitter extends EventEmitter } } -export function buildRealtimeURL(props: { baseURL: string; model: string }): URL { - const path = '/realtime'; +export function isAzure(client: Pick): client is AzureOpenAI { + return client instanceof AzureOpenAI; +} - const url = new URL(props.baseURL + (props.baseURL.endsWith('/') ? path.slice(1) : path)); +export function buildRealtimeURL(client: Pick, model: string): URL { + const path = '/realtime'; + const baseURL = client.baseURL; + const url = new URL(baseURL + (baseURL.endsWith('/') ? path.slice(1) : path)); url.protocol = 'wss'; - url.searchParams.set('model', props.model); + if (isAzure(client)) { + url.searchParams.set('api-version', client.apiVersion); + url.searchParams.set('deployment', model); + } else { + url.searchParams.set('model', model); + } return url; } diff --git a/src/beta/realtime/websocket.ts b/src/beta/realtime/websocket.ts index e0853779d..349cf5760 100644 --- a/src/beta/realtime/websocket.ts +++ b/src/beta/realtime/websocket.ts @@ -1,8 +1,8 @@ -import { OpenAI } from '../../index'; +import { AzureOpenAI, OpenAI } from '../../index'; import { OpenAIError } from '../../error'; import * as Core from '../../core'; import type { RealtimeClientEvent, RealtimeServerEvent } from '../../resources/beta/realtime/realtime'; -import { OpenAIRealtimeEmitter, buildRealtimeURL } from './internal-base'; +import { OpenAIRealtimeEmitter, buildRealtimeURL, isAzure } from './internal-base'; interface MessageEvent { data: string; @@ -26,6 +26,11 @@ export class OpenAIRealtimeWebSocket extends OpenAIRealtimeEmitter { props: { model: string; dangerouslyAllowBrowser?: boolean; + /** + * Callback to mutate the URL, needed for Azure. + * @internal + */ + onURL?: (url: URL) => void; }, client?: Pick, ) { @@ -44,11 +49,13 @@ export class OpenAIRealtimeWebSocket extends OpenAIRealtimeEmitter { client ??= new OpenAI({ dangerouslyAllowBrowser }); - this.url = buildRealtimeURL({ baseURL: client.baseURL, model: props.model }); + this.url = buildRealtimeURL(client, props.model); + props.onURL?.(this.url); + // @ts-ignore this.socket = new WebSocket(this.url, [ 'realtime', - `openai-insecure-api-key.${client.apiKey}`, + ...(isAzure(client) ? [] : [`openai-insecure-api-key.${client.apiKey}`]), 'openai-beta.realtime-v1', ]); @@ -77,6 +84,45 @@ export class OpenAIRealtimeWebSocket extends OpenAIRealtimeEmitter { this.socket.addEventListener('error', (event: any) => { this._onError(null, event.message, null); }); + + if (isAzure(client)) { + if (this.url.searchParams.get('Authorization') !== null) { + this.url.searchParams.set('Authorization', ''); + } else { + this.url.searchParams.set('api-key', ''); + } + } + } + + static async azure( + client: AzureOpenAI, + options: { deploymentName?: string; dangerouslyAllowBrowser?: boolean } = {}, + ): Promise { + const token = await client._getAzureADToken(); + function onURL(url: URL) { + if (client.apiKey !== '') { + url.searchParams.set('api-key', client.apiKey); + } else { + if (token) { + url.searchParams.set('Authorization', `Bearer ${token}`); + } else { + throw new Error('AzureOpenAI is not instantiated correctly. No API key or token provided.'); + } + } + } + const deploymentName = options.deploymentName ?? client.deploymentName; + if (!deploymentName) { + throw new Error('No deployment name provided'); + } + const { dangerouslyAllowBrowser } = options; + return new OpenAIRealtimeWebSocket( + { + model: deploymentName, + onURL, + ...(dangerouslyAllowBrowser ? { dangerouslyAllowBrowser } : {}), + }, + client, + ); } send(event: RealtimeClientEvent) { diff --git a/src/beta/realtime/ws.ts b/src/beta/realtime/ws.ts index 631a36cd2..51339089c 100644 --- a/src/beta/realtime/ws.ts +++ b/src/beta/realtime/ws.ts @@ -1,7 +1,7 @@ import * as WS from 'ws'; -import { OpenAI } from '../../index'; +import { AzureOpenAI, OpenAI } from '../../index'; import type { RealtimeClientEvent, RealtimeServerEvent } from '../../resources/beta/realtime/realtime'; -import { OpenAIRealtimeEmitter, buildRealtimeURL } from './internal-base'; +import { OpenAIRealtimeEmitter, buildRealtimeURL, isAzure } from './internal-base'; export class OpenAIRealtimeWS extends OpenAIRealtimeEmitter { url: URL; @@ -14,12 +14,12 @@ export class OpenAIRealtimeWS extends OpenAIRealtimeEmitter { super(); client ??= new OpenAI(); - this.url = buildRealtimeURL({ baseURL: client.baseURL, model: props.model }); + this.url = buildRealtimeURL(client, props.model); this.socket = new WS.WebSocket(this.url, { ...props.options, headers: { ...props.options?.headers, - Authorization: `Bearer ${client.apiKey}`, + ...(isAzure(client) ? {} : { Authorization: `Bearer ${client.apiKey}` }), 'OpenAI-Beta': 'realtime=v1', }, }); @@ -51,6 +51,20 @@ export class OpenAIRealtimeWS extends OpenAIRealtimeEmitter { }); } + static async azure( + client: AzureOpenAI, + options: { deploymentName?: string; options?: WS.ClientOptions | undefined } = {}, + ): Promise { + const deploymentName = options.deploymentName ?? client.deploymentName; + if (!deploymentName) { + throw new Error('No deployment name provided'); + } + return new OpenAIRealtimeWS( + { model: deploymentName, options: { headers: await getAzureHeaders(client) } }, + client, + ); + } + send(event: RealtimeClientEvent) { try { this.socket.send(JSON.stringify(event)); @@ -67,3 +81,16 @@ export class OpenAIRealtimeWS extends OpenAIRealtimeEmitter { } } } + +async function getAzureHeaders(client: AzureOpenAI) { + if (client.apiKey !== '') { + return { 'api-key': client.apiKey }; + } else { + const token = await client._getAzureADToken(); + if (token) { + return { Authorization: `Bearer ${token}` }; + } else { + throw new Error('AzureOpenAI is not instantiated correctly. No API key or token provided.'); + } + } +} diff --git a/src/index.ts b/src/index.ts index 944def00f..3de224d90 100644 --- a/src/index.ts +++ b/src/index.ts @@ -491,7 +491,7 @@ export interface AzureClientOptions extends ClientOptions { /** API Client for interfacing with the Azure OpenAI API. */ export class AzureOpenAI extends OpenAI { private _azureADTokenProvider: (() => Promise) | undefined; - private _deployment: string | undefined; + deploymentName: string | undefined; apiVersion: string = ''; /** * API Client for interfacing with the Azure OpenAI API. @@ -574,7 +574,7 @@ export class AzureOpenAI extends OpenAI { this._azureADTokenProvider = azureADTokenProvider; this.apiVersion = apiVersion; - this._deployment = deployment; + this.deploymentName = deployment; } override buildRequest( @@ -589,7 +589,7 @@ export class AzureOpenAI extends OpenAI { if (!Core.isObj(options.body)) { throw new Error('Expected request body to be an object'); } - const model = this._deployment || options.body['model']; + const model = this.deploymentName || options.body['model']; if (model !== undefined && !this.baseURL.includes('/deployments')) { options.path = `/deployments/${model}${options.path}`; } @@ -597,7 +597,7 @@ export class AzureOpenAI extends OpenAI { return super.buildRequest(options, props); } - private async _getAzureADToken(): Promise { + async _getAzureADToken(): Promise { if (typeof this._azureADTokenProvider === 'function') { const token = await this._azureADTokenProvider(); if (!token || typeof token !== 'string') { From 6f89573f9b334960195b074e17ad70df32329e8e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 29 Jan 2025 15:45:55 +0000 Subject: [PATCH 109/246] release: 4.81.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d140407b9..de35570a8 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.80.1" + ".": "4.81.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e4d4d73b7..b24c0869d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.81.0 (2025-01-29) + +Full Changelog: [v4.80.1...v4.81.0](https://github.com/openai/openai-node/compare/v4.80.1...v4.81.0) + +### Features + +* **azure:** Realtime API support ([#1287](https://github.com/openai/openai-node/issues/1287)) ([fe090c0](https://github.com/openai/openai-node/commit/fe090c0a57570217eb0b431e2cce40bf61de2b75)) + ## 4.80.1 (2025-01-24) Full Changelog: [v4.80.0...v4.80.1](https://github.com/openai/openai-node/compare/v4.80.0...v4.80.1) diff --git a/jsr.json b/jsr.json index e2ecad87f..18d000862 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.80.1", + "version": "4.81.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 497c7fae9..07faa0019 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.80.1", + "version": "4.81.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 7d762daed..3b4d4eee5 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.80.1'; // x-release-please-version +export const VERSION = '4.81.0'; // x-release-please-version From a0519f5882e4ed1df388f5c7014a6e0d408cdc40 Mon Sep 17 00:00:00 2001 From: Deyaaeldeen Almahallawi Date: Fri, 31 Jan 2025 04:26:47 -0600 Subject: [PATCH 110/246] fix(examples/realtime): remove duplicate `session.update` call (#1293) --- examples/azure/realtime/ws.ts | 7 ------- examples/realtime/ws.ts | 7 ------- 2 files changed, 14 deletions(-) diff --git a/examples/azure/realtime/ws.ts b/examples/azure/realtime/ws.ts index ae20a1438..6ab7b742a 100644 --- a/examples/azure/realtime/ws.ts +++ b/examples/azure/realtime/ws.ts @@ -25,13 +25,6 @@ async function main() { model: 'gpt-4o-realtime-preview', }, }); - rt.send({ - type: 'session.update', - session: { - modalities: ['text'], - model: 'gpt-4o-realtime-preview', - }, - }); rt.send({ type: 'conversation.item.create', diff --git a/examples/realtime/ws.ts b/examples/realtime/ws.ts index bba140800..08c6fbcb6 100644 --- a/examples/realtime/ws.ts +++ b/examples/realtime/ws.ts @@ -13,13 +13,6 @@ async function main() { model: 'gpt-4o-realtime-preview', }, }); - rt.send({ - type: 'session.update', - session: { - modalities: ['text'], - model: 'gpt-4o-realtime-preview', - }, - }); rt.send({ type: 'conversation.item.create', From 608200f7cfdeca079a9a6457f9c306baf96c4712 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 19:08:33 +0000 Subject: [PATCH 111/246] feat(api): add o3-mini (#1295) fix(types): correct metadata type + other fixes --- .stats.yml | 2 +- api.md | 1 + src/index.ts | 1 + src/resources/audio/transcriptions.ts | 4 +- src/resources/batches.ts | 20 ++-- src/resources/beta/assistants.ts | 42 +++++--- src/resources/beta/realtime/realtime.ts | 89 ++++++++++++++-- src/resources/beta/realtime/sessions.ts | 35 ++++-- src/resources/beta/threads/messages.ts | 31 +++--- src/resources/beta/threads/runs/runs.ts | 39 ++++--- src/resources/beta/threads/runs/steps.ts | 11 +- src/resources/beta/threads/threads.ts | 100 +++++++++++------- .../beta/vector-stores/vector-stores.ts | 31 +++--- src/resources/chat/chat.ts | 2 + src/resources/chat/completions.ts | 14 ++- src/resources/shared.ts | 10 ++ src/resources/uploads/uploads.ts | 2 +- tests/api-resources/beta/assistants.test.ts | 6 +- .../beta/threads/messages.test.ts | 2 +- .../beta/threads/runs/runs.test.ts | 4 +- .../beta/threads/threads.test.ts | 18 ++-- 21 files changed, 320 insertions(+), 144 deletions(-) diff --git a/.stats.yml b/.stats.yml index d518bac58..e49b5c56e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3904ef6b29a89c98f93a9b7da19879695f3c440564be6384db7af1b734611ede.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-6204952a29973265b9c0d66fc67ffaf53c6a90ae4d75cdacf9d147676f5274c9.yml diff --git a/api.md b/api.md index 33ab95ef6..516188b20 100644 --- a/api.md +++ b/api.md @@ -5,6 +5,7 @@ Types: - ErrorObject - FunctionDefinition - FunctionParameters +- Metadata - ResponseFormatJSONObject - ResponseFormatJSONSchema - ResponseFormatText diff --git a/src/index.ts b/src/index.ts index 3de224d90..f860579d3 100644 --- a/src/index.ts +++ b/src/index.ts @@ -451,6 +451,7 @@ export declare namespace OpenAI { export type ErrorObject = API.ErrorObject; export type FunctionDefinition = API.FunctionDefinition; export type FunctionParameters = API.FunctionParameters; + export type Metadata = API.Metadata; export type ResponseFormatJSONObject = API.ResponseFormatJSONObject; export type ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; export type ResponseFormatText = API.ResponseFormatText; diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 0b6da4620..6d0a07e1e 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -166,8 +166,8 @@ export interface TranscriptionCreateParams< /** * The language of the input audio. Supplying the input language in - * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will - * improve accuracy and latency. + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. */ language?: string; diff --git a/src/resources/batches.ts b/src/resources/batches.ts index ec5ca6331..aadda83a6 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -4,6 +4,7 @@ import { APIResource } from '../resource'; import { isRequestOptions } from '../core'; import * as Core from '../core'; import * as BatchesAPI from './batches'; +import * as Shared from './shared'; import { CursorPage, type CursorPageParams } from '../pagination'; export class Batches extends APIResource { @@ -138,11 +139,13 @@ export interface Batch { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The ID of the file containing the outputs of successfully executed requests. @@ -237,9 +240,14 @@ export interface BatchCreateParams { input_file_id: string; /** - * Optional custom metadata for the batch. + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: Record | null; + metadata?: Shared.Metadata | null; } export interface BatchListParams extends CursorPageParams {} diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 0e657b1d4..69a5db520 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -111,11 +111,13 @@ export interface Assistant { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * ID of the model to use. You can use the @@ -1118,11 +1120,13 @@ export interface AssistantCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The name of the assistant. The maximum length is 256 characters. @@ -1242,12 +1246,14 @@ export namespace AssistantCreateParams { file_ids?: Array; /** - * Set of 16 key-value pairs that can be attached to a vector store. This can be - * useful for storing additional information about the vector store in a structured - * format. Keys can be a maximum of 64 characters long and values can be a maxium - * of 512 characters long. + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown; + metadata?: Shared.Metadata | null; } } } @@ -1267,11 +1273,13 @@ export interface AssistantUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * ID of the model to use. You can use the diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index 0fb66eb49..c666221e1 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -2,6 +2,7 @@ import { APIResource } from '../../../resource'; import * as RealtimeAPI from './realtime'; +import * as Shared from '../../shared'; import * as SessionsAPI from './sessions'; import { Session as SessionsAPISession, @@ -741,9 +742,38 @@ export interface RealtimeResponse { id?: string; /** - * Developer-provided string key-value pairs associated with this response. + * Which conversation the response is added to, determined by the `conversation` + * field in the `response.create` event. If `auto`, the response will be added to + * the default conversation and the value of `conversation_id` will be an id like + * `conv_1234`. If `none`, the response will not be added to any conversation and + * the value of `conversation_id` will be `null`. If responses are being triggered + * by server VAD, the response will be added to the default conversation, thus the + * `conversation_id` will be an id like `conv_1234`. */ - metadata?: unknown | null; + conversation_id?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls, that was used in this response. + */ + max_output_tokens?: number | 'inf'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * The set of modalities the model used to respond. If there are multiple + * modalities, the model will pick one, for example if `modalities` is + * `["text", "audio"]`, the model could be responding in either text or audio. + */ + modalities?: Array<'text' | 'audio'>; /** * The object type, must be `realtime.response`. @@ -755,6 +785,11 @@ export interface RealtimeResponse { */ output?: Array; + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + /** * The final status of the response (`completed`, `cancelled`, `failed`, or * `incomplete`). @@ -766,6 +801,11 @@ export interface RealtimeResponse { */ status_details?: RealtimeResponseStatus; + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + */ + temperature?: number; + /** * Usage statistics for the Response, this will correspond to billing. A Realtime * API session will maintain a conversation context and append new Items to the @@ -773,6 +813,12 @@ export interface RealtimeResponse { * become the input for later turns. */ usage?: RealtimeResponseUsage; + + /** + * The voice the model used to respond. Current voice options are `alloy`, `ash`, + * `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + */ + voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; } /** @@ -1320,11 +1366,13 @@ export namespace ResponseCreateEvent { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maximum of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The set of modalities the model can respond with. To disable audio, set this to @@ -1716,8 +1764,11 @@ export namespace SessionUpdateEvent { * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs - * asynchronously through Whisper and should be treated as rough guidance rather - * than the representation understood by the model. + * asynchronously through + * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as rough guidance rather than the representation + * understood by the model. The client can optionally set the language and prompt + * for transcription, these fields will be passed to the Whisper API. */ input_audio_transcription?: Session.InputAudioTranscription; @@ -1801,15 +1852,33 @@ export namespace SessionUpdateEvent { * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs - * asynchronously through Whisper and should be treated as rough guidance rather - * than the representation understood by the model. + * asynchronously through + * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as rough guidance rather than the representation + * understood by the model. The client can optionally set the language and prompt + * for transcription, these fields will be passed to the Whisper API. */ export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + /** * The model to use for transcription, `whisper-1` is the only currently supported * model. */ model?: string; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. The + * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + * should match the audio language. + */ + prompt?: string; } export interface Tool { diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts index 68c48db59..d2afa25b1 100644 --- a/src/resources/beta/realtime/sessions.ts +++ b/src/resources/beta/realtime/sessions.ts @@ -203,7 +203,7 @@ export interface SessionCreateResponse { /** * Ephemeral key returned by the API. */ - client_secret?: SessionCreateResponse.ClientSecret; + client_secret: SessionCreateResponse.ClientSecret; /** * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. @@ -292,14 +292,14 @@ export namespace SessionCreateResponse { * Timestamp for when the token expires. Currently, all tokens expire after one * minute. */ - expires_at?: number; + expires_at: number; /** * Ephemeral key usable in client environments to authenticate connections to the * Realtime API. Use this in client-side environments rather than a standard API * token, which should only be used server-side. */ - value?: string; + value: string; } /** @@ -385,8 +385,11 @@ export interface SessionCreateParams { * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs - * asynchronously through Whisper and should be treated as rough guidance rather - * than the representation understood by the model. + * asynchronously through + * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as rough guidance rather than the representation + * understood by the model. The client can optionally set the language and prompt + * for transcription, these fields will be passed to the Whisper API. */ input_audio_transcription?: SessionCreateParams.InputAudioTranscription; @@ -470,15 +473,33 @@ export namespace SessionCreateParams { * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs - * asynchronously through Whisper and should be treated as rough guidance rather - * than the representation understood by the model. + * asynchronously through + * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as rough guidance rather than the representation + * understood by the model. The client can optionally set the language and prompt + * for transcription, these fields will be passed to the Whisper API. */ export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + /** * The model to use for transcription, `whisper-1` is the only currently supported * model. */ model?: string; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. The + * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + * should match the audio language. + */ + prompt?: string; } export interface Tool { diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index 8124f56cd..29fd2b29f 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -3,6 +3,7 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; +import * as Shared from '../../shared'; import * as AssistantsAPI from '../assistants'; import { CursorPage, type CursorPageParams } from '../../../pagination'; @@ -407,11 +408,13 @@ export interface Message { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * The object type, which is always `thread.message`. @@ -660,11 +663,13 @@ export interface MessageCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export namespace MessageCreateParams { @@ -693,11 +698,13 @@ export namespace MessageCreateParams { export interface MessageUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export interface MessageListParams extends CursorPageParams { diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 814ad3e89..84ba7b63c 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -8,6 +8,7 @@ import { AssistantStream, RunCreateParamsBaseStream } from '../../../../lib/Assi import { sleep } from '../../../../core'; import { RunSubmitToolOutputsParamsStream } from '../../../../lib/AssistantStream'; import * as RunsAPI from './runs'; +import * as Shared from '../../../shared'; import * as AssistantsAPI from '../../assistants'; import * as ChatAPI from '../../../chat/chat'; import * as MessagesAPI from '../messages'; @@ -415,11 +416,13 @@ export interface Run { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * The model that the @@ -705,10 +708,12 @@ export interface RunCreateParamsBase { /** * Body param: Set of 16 key-value pairs that can be attached to an object. This * can be useful for storing additional information about the object in a - * structured format. Keys can be a maximum of 64 characters long and values can be - * a maxium of 512 characters long. + * structured format, and querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * Body param: The ID of the @@ -823,11 +828,13 @@ export namespace RunCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export namespace AdditionalMessage { @@ -898,11 +905,13 @@ export interface RunCreateParamsStreaming extends RunCreateParamsBase { export interface RunUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export interface RunListParams extends CursorPageParams { diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index 6c6722b62..c491b4e83 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -4,6 +4,7 @@ import { APIResource } from '../../../../resource'; import { isRequestOptions } from '../../../../core'; import * as Core from '../../../../core'; import * as StepsAPI from './steps'; +import * as Shared from '../../../shared'; import { CursorPage, type CursorPageParams } from '../../../../pagination'; export class Steps extends APIResource { @@ -515,11 +516,13 @@ export interface RunStep { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * The object type, which is always `thread.run.step`. diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 453d8fa10..3f69c6e60 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -250,11 +250,13 @@ export interface Thread { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * The object type, which is always `thread`. @@ -322,11 +324,13 @@ export interface ThreadCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * A set of resources that are made available to the assistant's tools in this @@ -361,11 +365,13 @@ export namespace ThreadCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export namespace Message { @@ -447,12 +453,14 @@ export namespace ThreadCreateParams { file_ids?: Array; /** - * Set of 16 key-value pairs that can be attached to a vector store. This can be - * useful for storing additional information about the vector store in a structured - * format. Keys can be a maximum of 64 characters long and values can be a maxium - * of 512 characters long. + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown; + metadata?: Shared.Metadata | null; } } } @@ -461,11 +469,13 @@ export namespace ThreadCreateParams { export interface ThreadUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * A set of resources that are made available to the assistant's tools in this @@ -549,11 +559,13 @@ export interface ThreadCreateAndRunParamsBase { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -609,7 +621,8 @@ export interface ThreadCreateAndRunParamsBase { temperature?: number | null; /** - * If no thread is provided, an empty thread will be created. + * Options to create a new thread. If no thread is provided when running a request, + * an empty thread will be created. */ thread?: ThreadCreateAndRunParams.Thread; @@ -658,7 +671,8 @@ export interface ThreadCreateAndRunParamsBase { export namespace ThreadCreateAndRunParams { /** - * If no thread is provided, an empty thread will be created. + * Options to create a new thread. If no thread is provided when running a request, + * an empty thread will be created. */ export interface Thread { /** @@ -669,11 +683,13 @@ export namespace ThreadCreateAndRunParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * A set of resources that are made available to the assistant's tools in this @@ -708,11 +724,13 @@ export namespace ThreadCreateAndRunParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export namespace Message { @@ -794,12 +812,14 @@ export namespace ThreadCreateAndRunParams { file_ids?: Array; /** - * Set of 16 key-value pairs that can be attached to a vector store. This can be - * useful for storing additional information about the vector store in a structured - * format. Keys can be a maximum of 64 characters long and values can be a maxium - * of 512 characters long. + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown; + metadata?: Shared.Metadata | null; } } } diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts index cbff2d562..8438b79da 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/beta/vector-stores/vector-stores.ts @@ -3,6 +3,7 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; +import * as Shared from '../../shared'; import * as FileBatchesAPI from './file-batches'; import { FileBatchCreateParams, @@ -187,11 +188,13 @@ export interface VectorStore { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * The name of the vector store. @@ -300,11 +303,13 @@ export interface VectorStoreCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The name of the vector store. @@ -338,11 +343,13 @@ export interface VectorStoreUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The name of the vector store. diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 2230b19bd..d4a18929c 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -46,6 +46,8 @@ export class Chat extends APIResource { } export type ChatModel = + | 'o3-mini' + | 'o3-mini-2025-01-31' | 'o1' | 'o1-2024-12-17' | 'o1-preview' diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 683eb5ed4..d2de11458 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -1012,10 +1012,14 @@ export interface ChatCompletionCreateParamsBase { max_tokens?: number | null; /** - * Developer-defined tags and values used for filtering completions in the - * [dashboard](https://platform.openai.com/chat-completions). + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: Record | null; + metadata?: Shared.Metadata | null; /** * Output types that you would like the model to generate for this request. Most @@ -1109,9 +1113,9 @@ export interface ChatCompletionCreateParamsBase { * utilize scale tier credits until they are exhausted. * - If set to 'auto', and the Project is not Scale tier enabled, the request will * be processed using the default service tier with a lower uptime SLA and no - * latency guarentee. + * latency guarantee. * - If set to 'default', the request will be processed using the default service - * tier with a lower uptime SLA and no latency guarentee. + * tier with a lower uptime SLA and no latency guarantee. * - When not set, the default behavior is 'auto'. */ service_tier?: 'auto' | 'default' | null; diff --git a/src/resources/shared.ts b/src/resources/shared.ts index f44fda8a7..3bb11582f 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -55,6 +55,16 @@ export interface FunctionDefinition { */ export type FunctionParameters = Record; +/** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ +export type Metadata = Record; + export interface ResponseFormatJSONObject { /** * The type of response format being defined: `json_object` diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts index 8491d0fe2..bfe752cd7 100644 --- a/src/resources/uploads/uploads.ts +++ b/src/resources/uploads/uploads.ts @@ -113,7 +113,7 @@ export interface Upload { status: 'pending' | 'completed' | 'cancelled' | 'expired'; /** - * The ready File object after the Upload is completed. + * The `File` object represents a document that has been uploaded to OpenAI. */ file?: FilesAPI.FileObject | null; } diff --git a/tests/api-resources/beta/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts index a64465c77..88a10ba8f 100644 --- a/tests/api-resources/beta/assistants.test.ts +++ b/tests/api-resources/beta/assistants.test.ts @@ -25,7 +25,7 @@ describe('resource assistants', () => { model: 'gpt-4o', description: 'description', instructions: 'instructions', - metadata: {}, + metadata: { foo: 'string' }, name: 'name', response_format: 'auto', temperature: 1, @@ -33,7 +33,9 @@ describe('resource assistants', () => { code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [{ chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: {} }], + vector_stores: [ + { chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: { foo: 'string' } }, + ], }, }, tools: [{ type: 'code_interpreter' }], diff --git a/tests/api-resources/beta/threads/messages.test.ts b/tests/api-resources/beta/threads/messages.test.ts index c1f5f7b6e..e125edd84 100644 --- a/tests/api-resources/beta/threads/messages.test.ts +++ b/tests/api-resources/beta/threads/messages.test.ts @@ -28,7 +28,7 @@ describe('resource messages', () => { content: 'string', role: 'user', attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], - metadata: {}, + metadata: { foo: 'string' }, }); }); diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 4fd8261ac..9b728403f 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -30,13 +30,13 @@ describe('resource runs', () => { content: 'string', role: 'user', attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], - metadata: {}, + metadata: { foo: 'string' }, }, ], instructions: 'instructions', max_completion_tokens: 256, max_prompt_tokens: 256, - metadata: {}, + metadata: { foo: 'string' }, model: 'gpt-4o', parallel_tool_calls: true, response_format: 'auto', diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index aba266316..f26d6ec44 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -37,15 +37,17 @@ describe('resource threads', () => { content: 'string', role: 'user', attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], - metadata: {}, + metadata: { foo: 'string' }, }, ], - metadata: {}, + metadata: { foo: 'string' }, tool_resources: { code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [{ chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: {} }], + vector_stores: [ + { chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: { foo: 'string' } }, + ], }, }, }, @@ -118,7 +120,7 @@ describe('resource threads', () => { instructions: 'instructions', max_completion_tokens: 256, max_prompt_tokens: 256, - metadata: {}, + metadata: { foo: 'string' }, model: 'gpt-4o', parallel_tool_calls: true, response_format: 'auto', @@ -130,15 +132,17 @@ describe('resource threads', () => { content: 'string', role: 'user', attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], - metadata: {}, + metadata: { foo: 'string' }, }, ], - metadata: {}, + metadata: { foo: 'string' }, tool_resources: { code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [{ chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: {} }], + vector_stores: [ + { chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: { foo: 'string' } }, + ], }, }, }, From 145ff671d3a8111c81497f6bc9cd0cb5053a6cb0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 19:09:24 +0000 Subject: [PATCH 112/246] release: 4.82.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index de35570a8..b2ee58e08 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.81.0" + ".": "4.82.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index b24c0869d..7565cb01a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.82.0 (2025-01-31) + +Full Changelog: [v4.81.0...v4.82.0](https://github.com/openai/openai-node/compare/v4.81.0...v4.82.0) + +### Features + +* **api:** add o3-mini ([#1295](https://github.com/openai/openai-node/issues/1295)) ([378e2f7](https://github.com/openai/openai-node/commit/378e2f7af62c570adb4c7644a4d49576b698de41)) + + +### Bug Fixes + +* **examples/realtime:** remove duplicate `session.update` call ([#1293](https://github.com/openai/openai-node/issues/1293)) ([ad800b4](https://github.com/openai/openai-node/commit/ad800b4f9410c6838994c24a3386ea708717f72b)) +* **types:** correct metadata type + other fixes ([378e2f7](https://github.com/openai/openai-node/commit/378e2f7af62c570adb4c7644a4d49576b698de41)) + ## 4.81.0 (2025-01-29) Full Changelog: [v4.80.1...v4.81.0](https://github.com/openai/openai-node/compare/v4.80.1...v4.81.0) diff --git a/jsr.json b/jsr.json index 18d000862..7569332ce 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.81.0", + "version": "4.82.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 07faa0019..42e00822d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.81.0", + "version": "4.82.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 3b4d4eee5..07241a8cf 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.81.0'; // x-release-please-version +export const VERSION = '4.82.0'; // x-release-please-version From 7cf2a8571fb3c40ce3e67759af314e37bc3467e0 Mon Sep 17 00:00:00 2001 From: Deyaaeldeen Almahallawi Date: Mon, 3 Feb 2025 11:22:06 -0600 Subject: [PATCH 113/246] fix(azure/audio): use model param for deployments (#1297) --- src/core.ts | 2 ++ src/index.ts | 2 +- src/resources/audio/transcriptions.ts | 5 ++++- src/resources/audio/translations.ts | 5 ++++- tests/lib/azure.test.ts | 10 ++++++---- 5 files changed, 17 insertions(+), 7 deletions(-) diff --git a/src/core.ts b/src/core.ts index 3d2d029a5..23d19b5bd 100644 --- a/src/core.ts +++ b/src/core.ts @@ -814,6 +814,7 @@ export type RequestOptions< signal?: AbortSignal | undefined | null; idempotencyKey?: string; + __metadata?: Record; __binaryRequest?: boolean | undefined; __binaryResponse?: boolean | undefined; __streamClass?: typeof Stream; @@ -836,6 +837,7 @@ const requestOptionsKeys: KeysEnum = { signal: true, idempotencyKey: true, + __metadata: true, __binaryRequest: true, __binaryResponse: true, __streamClass: true, diff --git a/src/index.ts b/src/index.ts index f860579d3..f4e940af8 100644 --- a/src/index.ts +++ b/src/index.ts @@ -590,7 +590,7 @@ export class AzureOpenAI extends OpenAI { if (!Core.isObj(options.body)) { throw new Error('Expected request body to be an object'); } - const model = this.deploymentName || options.body['model']; + const model = this.deploymentName || options.body['model'] || options.__metadata?.['model']; if (model !== undefined && !this.baseURL.includes('/deployments')) { options.path = `/deployments/${model}${options.path}`; } diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 6d0a07e1e..d0e671243 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -25,7 +25,10 @@ export class Transcriptions extends APIResource { body: TranscriptionCreateParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post('/audio/transcriptions', Core.multipartFormRequestOptions({ body, ...options })); + return this._client.post( + '/audio/transcriptions', + Core.multipartFormRequestOptions({ body, ...options, __metadata: { model: body.model } }), + ); } } diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index c6bf7c870..0621deecb 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -26,7 +26,10 @@ export class Translations extends APIResource { body: TranslationCreateParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post('/audio/translations', Core.multipartFormRequestOptions({ body, ...options })); + return this._client.post( + '/audio/translations', + Core.multipartFormRequestOptions({ body, ...options, __metadata: { model: body.model } }), + ); } } diff --git a/tests/lib/azure.test.ts b/tests/lib/azure.test.ts index 0e3c2c5a3..430efbe57 100644 --- a/tests/lib/azure.test.ts +++ b/tests/lib/azure.test.ts @@ -495,21 +495,23 @@ describe('azure request building', () => { ); }); - test('Audio translations is not handled', async () => { + test('handles audio translations', async () => { const { url } = (await client.audio.translations.create({ model: deployment, file: { url: 'https://example.com/', blob: () => 0 as any }, })) as any; - expect(url).toStrictEqual(`https://example.com/openai/audio/translations?api-version=${apiVersion}`); + expect(url).toStrictEqual( + `https://example.com/openai/deployments/${deployment}/audio/translations?api-version=${apiVersion}`, + ); }); - test('Audio transcriptions is not handled', async () => { + test('handles audio transcriptions', async () => { const { url } = (await client.audio.transcriptions.create({ model: deployment, file: { url: 'https://example.com/', blob: () => 0 as any }, })) as any; expect(url).toStrictEqual( - `https://example.com/openai/audio/transcriptions?api-version=${apiVersion}`, + `https://example.com/openai/deployments/${deployment}/audio/transcriptions?api-version=${apiVersion}`, ); }); From 29a86274c3965826e132373fccbea430efb3bacd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 4 Feb 2025 18:39:40 +0000 Subject: [PATCH 114/246] feat(client): send `X-Stainless-Timeout` header (#1299) --- src/core.ts | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/core.ts b/src/core.ts index 23d19b5bd..6578c0781 100644 --- a/src/core.ts +++ b/src/core.ts @@ -315,6 +315,7 @@ export abstract class APIClient { options: FinalRequestOptions, { retryCount = 0 }: { retryCount?: number } = {}, ): { req: RequestInit; url: string; timeout: number } { + options = { ...options }; const { method, path, query, headers: headers = {} } = options; const body = @@ -327,9 +328,9 @@ export abstract class APIClient { const url = this.buildURL(path!, query); if ('timeout' in options) validatePositiveInteger('timeout', options.timeout); - const timeout = options.timeout ?? this.timeout; + options.timeout = options.timeout ?? this.timeout; const httpAgent = options.httpAgent ?? this.httpAgent ?? getDefaultAgent(url); - const minAgentTimeout = timeout + 1000; + const minAgentTimeout = options.timeout + 1000; if ( typeof (httpAgent as any)?.options?.timeout === 'number' && minAgentTimeout > ((httpAgent as any).options.timeout ?? 0) @@ -358,7 +359,7 @@ export abstract class APIClient { signal: options.signal ?? null, }; - return { req, url, timeout }; + return { req, url, timeout: options.timeout }; } private buildHeaders({ @@ -386,15 +387,22 @@ export abstract class APIClient { delete reqHeaders['content-type']; } - // Don't set the retry count header if it was already set or removed through default headers or by the - // caller. We check `defaultHeaders` and `headers`, which can contain nulls, instead of `reqHeaders` to - // account for the removal case. + // Don't set theses headers if they were already set or removed through default headers or by the caller. + // We check `defaultHeaders` and `headers`, which can contain nulls, instead of `reqHeaders` to account + // for the removal case. if ( getHeader(defaultHeaders, 'x-stainless-retry-count') === undefined && getHeader(headers, 'x-stainless-retry-count') === undefined ) { reqHeaders['x-stainless-retry-count'] = String(retryCount); } + if ( + getHeader(defaultHeaders, 'x-stainless-timeout') === undefined && + getHeader(headers, 'x-stainless-timeout') === undefined && + options.timeout + ) { + reqHeaders['x-stainless-timeout'] = String(options.timeout); + } this.validateHeaders(reqHeaders, headers); From bcf459fb5594d3d7198d95c5569cac4aa6bd483e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 11:26:36 +0000 Subject: [PATCH 115/246] fix(api/types): correct audio duration & role types (#1300) --- .stats.yml | 2 +- api.md | 1 + src/lib/ChatCompletionStream.ts | 3 +- src/resources/audio/transcriptions.ts | 2 +- src/resources/audio/translations.ts | 2 +- src/resources/beta/realtime/realtime.ts | 79 +++++++++++++++++++++++-- src/resources/chat/completions.ts | 4 +- 7 files changed, 83 insertions(+), 10 deletions(-) diff --git a/.stats.yml b/.stats.yml index e49b5c56e..df7877dfd 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-6204952a29973265b9c0d66fc67ffaf53c6a90ae4d75cdacf9d147676f5274c9.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-fc5dbc19505b0035f9e7f88868619f4fb519b048bde011f6154f3132d4be71fb.yml diff --git a/api.md b/api.md index 516188b20..01854a8e0 100644 --- a/api.md +++ b/api.md @@ -229,6 +229,7 @@ Types: - ConversationItemInputAudioTranscriptionFailedEvent - ConversationItemTruncateEvent - ConversationItemTruncatedEvent +- ConversationItemWithReference - ErrorEvent - InputAudioBufferAppendEvent - InputAudioBufferClearEvent diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts index a88f8a23b..6c846f70b 100644 --- a/src/lib/ChatCompletionStream.ts +++ b/src/lib/ChatCompletionStream.ts @@ -12,6 +12,7 @@ import { type ChatCompletionCreateParams, type ChatCompletionCreateParamsStreaming, type ChatCompletionCreateParamsBase, + type ChatCompletionRole, } from '../resources/chat/completions'; import { AbstractChatCompletionRunner, @@ -797,7 +798,7 @@ export namespace ChatCompletionSnapshot { /** * The role of the author of this message. */ - role?: 'system' | 'user' | 'assistant' | 'function' | 'tool'; + role?: ChatCompletionRole; } export namespace Message { diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index d0e671243..6fbe96b58 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -106,7 +106,7 @@ export interface TranscriptionVerbose { /** * The duration of the input audio. */ - duration: string; + duration: number; /** * The language of the input audio. diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index 0621deecb..dac519ede 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -41,7 +41,7 @@ export interface TranslationVerbose { /** * The duration of the input audio. */ - duration: string; + duration: number; /** * The language of the output translation (always `english`). diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index c666221e1..e46dcdaaf 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -439,6 +439,76 @@ export interface ConversationItemTruncatedEvent { type: 'conversation.item.truncated'; } +/** + * The item to add to the conversation. + */ +export interface ConversationItemWithReference { + /** + * For an item of type (`message` | `function_call` | `function_call_output`) this + * field allows the client to assign the unique ID of the item. It is not required + * because the server will generate one if not provided. + * + * For an item of type `item_reference`, this field is required and is a reference + * to any item that has previously existed in the conversation. + */ + id?: string; + + /** + * The arguments of the function call (for `function_call` items). + */ + arguments?: string; + + /** + * The ID of the function call (for `function_call` and `function_call_output` + * items). If passed on a `function_call_output` item, the server will check that a + * `function_call` item with the same ID exists in the conversation history. + */ + call_id?: string; + + /** + * The content of the message, applicable for `message` items. + * + * - Message items of role `system` support only `input_text` content + * - Message items of role `user` support `input_text` and `input_audio` content + * - Message items of role `assistant` support `text` content. + */ + content?: Array; + + /** + * The name of the function being called (for `function_call` items). + */ + name?: string; + + /** + * Identifier for the API object being returned - always `realtime.item`. + */ + object?: 'realtime.item'; + + /** + * The output of the function call (for `function_call_output` items). + */ + output?: string; + + /** + * The role of the message sender (`user`, `assistant`, `system`), only applicable + * for `message` items. + */ + role?: 'user' | 'assistant' | 'system'; + + /** + * The status of the item (`completed`, `incomplete`). These have no effect on the + * conversation, but are accepted for consistency with the + * `conversation.item.created` event. + */ + status?: 'completed' | 'incomplete'; + + /** + * The type of the item (`message`, `function_call`, `function_call_output`, + * `item_reference`). + */ + type?: 'message' | 'function_call' | 'function_call_output' | 'item_reference'; +} + /** * Returned when an error occurs, which could be a client problem or a server * problem. Most errors are recoverable and the session will stay open, we @@ -1336,11 +1406,12 @@ export namespace ResponseCreateEvent { conversation?: (string & {}) | 'auto' | 'none'; /** - * Input items to include in the prompt for the model. Creates a new context for - * this response, without including the default conversation. Can include - * references to items from the default conversation. + * Input items to include in the prompt for the model. Using this field creates a + * new context for this Response instead of using the default conversation. An + * empty array `[]` will clear the context for this Response. Note that this can + * include references to items from the default conversation. */ - input?: Array; + input?: Array; /** * The default system instructions (i.e. system message) prepended to model calls. diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index d2de11458..55b008cf0 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -371,7 +371,7 @@ export namespace ChatCompletionChunk { /** * The role of the author of this message. */ - role?: 'system' | 'user' | 'assistant' | 'tool'; + role?: 'developer' | 'system' | 'user' | 'assistant' | 'tool'; tool_calls?: Array; } @@ -756,7 +756,7 @@ export type ChatCompletionReasoningEffort = 'low' | 'medium' | 'high'; /** * The role of the author of a message */ -export type ChatCompletionRole = 'system' | 'user' | 'assistant' | 'tool' | 'function'; +export type ChatCompletionRole = 'developer' | 'system' | 'user' | 'assistant' | 'tool' | 'function'; /** * Options for streaming response. Only set this when you set `stream: true`. From 41a7ce315f3ee4495ae259d9bbed77701dc52430 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 12:26:03 +0000 Subject: [PATCH 116/246] release: 4.83.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b2ee58e08..6eb0f130e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.82.0" + ".": "4.83.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 7565cb01a..f61def5e4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.83.0 (2025-02-05) + +Full Changelog: [v4.82.0...v4.83.0](https://github.com/openai/openai-node/compare/v4.82.0...v4.83.0) + +### Features + +* **client:** send `X-Stainless-Timeout` header ([#1299](https://github.com/openai/openai-node/issues/1299)) ([ddfc686](https://github.com/openai/openai-node/commit/ddfc686f43a3420c3adf8dec2e82b4d10a121eb8)) + + +### Bug Fixes + +* **api/types:** correct audio duration & role types ([#1300](https://github.com/openai/openai-node/issues/1300)) ([a955ac2](https://github.com/openai/openai-node/commit/a955ac2bf5bee663d530d0c82b0005bf3ce6fc47)) +* **azure/audio:** use model param for deployments ([#1297](https://github.com/openai/openai-node/issues/1297)) ([85de382](https://github.com/openai/openai-node/commit/85de382db17cbe5f112650e79d0fc1cc841efbb2)) + ## 4.82.0 (2025-01-31) Full Changelog: [v4.81.0...v4.82.0](https://github.com/openai/openai-node/compare/v4.81.0...v4.82.0) diff --git a/jsr.json b/jsr.json index 7569332ce..6fa05e624 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.82.0", + "version": "4.83.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 42e00822d..bd507e9f8 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.82.0", + "version": "4.83.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 07241a8cf..13c764d7d 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.82.0'; // x-release-please-version +export const VERSION = '4.83.0'; // x-release-please-version From 2a43456b2e085f79ff3ebebdfa55c65f68dfbe56 Mon Sep 17 00:00:00 2001 From: Minh Anh Date: Wed, 5 Feb 2025 11:29:45 -0800 Subject: [PATCH 117/246] Fix Azure OpenAI client import --- src/beta/realtime/websocket.ts | 2 +- src/beta/realtime/ws.ts | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/beta/realtime/websocket.ts b/src/beta/realtime/websocket.ts index 349cf5760..e8143fdbf 100644 --- a/src/beta/realtime/websocket.ts +++ b/src/beta/realtime/websocket.ts @@ -95,7 +95,7 @@ export class OpenAIRealtimeWebSocket extends OpenAIRealtimeEmitter { } static async azure( - client: AzureOpenAI, + client: Pick, options: { deploymentName?: string; dangerouslyAllowBrowser?: boolean } = {}, ): Promise { const token = await client._getAzureADToken(); diff --git a/src/beta/realtime/ws.ts b/src/beta/realtime/ws.ts index 51339089c..3f51dfc4b 100644 --- a/src/beta/realtime/ws.ts +++ b/src/beta/realtime/ws.ts @@ -52,7 +52,7 @@ export class OpenAIRealtimeWS extends OpenAIRealtimeEmitter { } static async azure( - client: AzureOpenAI, + client: Pick, options: { deploymentName?: string; options?: WS.ClientOptions | undefined } = {}, ): Promise { const deploymentName = options.deploymentName ?? client.deploymentName; @@ -82,7 +82,7 @@ export class OpenAIRealtimeWS extends OpenAIRealtimeEmitter { } } -async function getAzureHeaders(client: AzureOpenAI) { +async function getAzureHeaders(client: Pick) { if (client.apiKey !== '') { return { 'api-key': client.apiKey }; } else { From 27d354a363d3c8dc5056bd28f8f1073757046f48 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 21:25:41 +0000 Subject: [PATCH 118/246] fix(api): add missing reasoning effort + model enums (#1302) --- .stats.yml | 2 +- src/resources/beta/assistants.ts | 51 ++++++++++++++++++- src/resources/beta/threads/runs/runs.ts | 10 ++++ src/resources/chat/completions.ts | 8 +-- tests/api-resources/beta/assistants.test.ts | 1 + .../beta/threads/runs/runs.test.ts | 1 + 6 files changed, 67 insertions(+), 6 deletions(-) diff --git a/.stats.yml b/.stats.yml index df7877dfd..8a5d2c06b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-fc5dbc19505b0035f9e7f88868619f4fb519b048bde011f6154f3132d4be71fb.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-7c699d4503077d06a4a44f52c0c1f902d19a87c766b8be75b97c8dfd484ad4aa.yml diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 69a5db520..0cc63d691 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -1133,6 +1133,16 @@ export interface AssistantCreateParams { */ name?: string | null; + /** + * **o1 and o3-mini models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + reasoning_effort?: 'low' | 'medium' | 'high' | null; + /** * Specifies the format that the model must output. Compatible with * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -1288,13 +1298,52 @@ export interface AssistantUpdateParams { * [Model overview](https://platform.openai.com/docs/models) for descriptions of * them. */ - model?: string; + model?: + | (string & {}) + | 'o3-mini' + | 'o3-mini-2025-01-31' + | 'o1' + | 'o1-2024-12-17' + | 'gpt-4o' + | 'gpt-4o-2024-11-20' + | 'gpt-4o-2024-08-06' + | 'gpt-4o-2024-05-13' + | 'gpt-4o-mini' + | 'gpt-4o-mini-2024-07-18' + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613'; /** * The name of the assistant. The maximum length is 256 characters. */ name?: string | null; + /** + * **o1 and o3-mini models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + reasoning_effort?: 'low' | 'medium' | 'high' | null; + /** * Specifies the format that the model must output. Compatible with * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 84ba7b63c..8ab94cc99 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -731,6 +731,16 @@ export interface RunCreateParamsBase { */ parallel_tool_calls?: boolean; + /** + * Body param: **o1 and o3-mini models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + reasoning_effort?: 'low' | 'medium' | 'high' | null; + /** * Body param: Specifies the format that the model must output. Compatible with * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 55b008cf0..2586845c3 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -744,14 +744,14 @@ export interface ChatCompletionPredictionContent { } /** - * **o1 models only** + * **o1 and o3-mini models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ -export type ChatCompletionReasoningEffort = 'low' | 'medium' | 'high'; +export type ChatCompletionReasoningEffort = 'low' | 'medium' | 'high' | null; /** * The role of the author of a message @@ -1063,14 +1063,14 @@ export interface ChatCompletionCreateParamsBase { presence_penalty?: number | null; /** - * **o1 models only** + * **o1 and o3-mini models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: ChatCompletionReasoningEffort; + reasoning_effort?: ChatCompletionReasoningEffort | null; /** * An object specifying the format that the model must output. diff --git a/tests/api-resources/beta/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts index 88a10ba8f..16bc9f942 100644 --- a/tests/api-resources/beta/assistants.test.ts +++ b/tests/api-resources/beta/assistants.test.ts @@ -27,6 +27,7 @@ describe('resource assistants', () => { instructions: 'instructions', metadata: { foo: 'string' }, name: 'name', + reasoning_effort: 'low', response_format: 'auto', temperature: 1, tool_resources: { diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 9b728403f..13ae89a00 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -39,6 +39,7 @@ describe('resource runs', () => { metadata: { foo: 'string' }, model: 'gpt-4o', parallel_tool_calls: true, + reasoning_effort: 'low', response_format: 'auto', stream: false, temperature: 1, From f44641236e9f90758c535cc948d5734ae20fd5a5 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 5 Feb 2025 20:33:57 +0000 Subject: [PATCH 119/246] docs(readme): cleanup into multiple files --- README.md | 421 +++++++++++----------------------------------------- azure.md | 49 ++++++ helpers.md | 122 ++++++++++----- realtime.md | 87 +++++++++++ 4 files changed, 313 insertions(+), 366 deletions(-) create mode 100644 azure.md create mode 100644 realtime.md diff --git a/README.md b/README.md index a1f4bf760..166e35e22 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ import OpenAI from 'jsr:@openai/openai'; The full API of this library can be found in [api.md file](api.md) along with many [code examples](https://github.com/openai/openai-node/tree/master/examples). The code below shows how to get started using the chat completions API. -```js +```ts import OpenAI from 'openai'; const client = new OpenAI({ @@ -80,189 +80,11 @@ async function main() { main(); ``` -If you need to cancel a stream, you can `break` from the loop -or call `stream.controller.abort()`. - -## Realtime API beta - -The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as [function calling](https://platform.openai.com/docs/guides/function-calling) through a `WebSocket` connection. - -The Realtime API works through a combination of client-sent events and server-sent events. Clients can send events to do things like update session configuration or send text and audio inputs. Server events confirm when audio responses have completed, or when a text response from the model has been received. A full event reference can be found [here](https://platform.openai.com/docs/api-reference/realtime-client-events) and a guide can be found [here](https://platform.openai.com/docs/guides/realtime). - -This SDK supports accessing the Realtime API through the [WebSocket API](https://developer.mozilla.org/en-US/docs/Web/API/WebSocket) or with [ws](https://github.com/websockets/ws). - -Basic text based example with `ws`: - -```ts -// requires `yarn add ws @types/ws` -import { OpenAIRealtimeWS } from 'openai/beta/realtime/ws'; - -const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); - -// access the underlying `ws.WebSocket` instance -rt.socket.on('open', () => { - console.log('Connection opened!'); - rt.send({ - type: 'session.update', - session: { - modalities: ['text'], - model: 'gpt-4o-realtime-preview', - }, - }); - - rt.send({ - type: 'conversation.item.create', - item: { - type: 'message', - role: 'user', - content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], - }, - }); - - rt.send({ type: 'response.create' }); -}); - -rt.on('error', (err) => { - // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors - throw err; -}); - -rt.on('session.created', (event) => { - console.log('session created!', event.session); - console.log(); -}); - -rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); -rt.on('response.text.done', () => console.log()); - -rt.on('response.done', () => rt.close()); - -rt.socket.on('close', () => console.log('\nConnection closed!')); -``` - -To use the web API `WebSocket` implementation, replace `OpenAIRealtimeWS` with `OpenAIRealtimeWebSocket` and adjust any `rt.socket` access: - -```ts -import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; - -const rt = new OpenAIRealtimeWebSocket({ model: 'gpt-4o-realtime-preview-2024-12-17' }); -// ... -rt.socket.addEventListener('open', () => { - // ... -}); -``` - -A full example can be found [here](https://github.com/openai/openai-node/blob/master/examples/realtime/websocket.ts). - -### Realtime error handling - -When an error is encountered, either on the client side or returned from the server through the [`error` event](https://platform.openai.com/docs/guides/realtime-model-capabilities#error-handling), the `error` event listener will be fired. However, if you haven't registered an `error` event listener then an `unhandled Promise rejection` error will be thrown. - -It is **highly recommended** that you register an `error` event listener and handle errors approriately as typically the underlying connection is still usable. - -```ts -const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); -rt.on('error', (err) => { - // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors - throw err; -}); -``` - -### Request & Response types - -This library includes TypeScript definitions for all request params and response fields. You may import and use them like so: - - -```ts -import OpenAI from 'openai'; - -const client = new OpenAI({ - apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted -}); - -async function main() { - const params: OpenAI.Chat.ChatCompletionCreateParams = { - messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-4o', - }; - const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params); -} - -main(); -``` - -Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors. - -> [!IMPORTANT] -> Previous versions of this SDK used a `Configuration` class. See the [v3 to v4 migration guide](https://github.com/openai/openai-node/discussions/217). - -### Polling Helpers - -When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete. The SDK includes -helper functions which will poll the status until it reaches a terminal state and then return the resulting object. -If an API method results in an action which could benefit from polling there will be a corresponding version of the -method ending in 'AndPoll'. - -For instance to create a Run and poll until it reaches a terminal state you can run: - -```ts -const run = await openai.beta.threads.runs.createAndPoll(thread.id, { - assistant_id: assistantId, -}); -``` - -More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/deep-dive/run-lifecycle) - -### Bulk Upload Helpers - -When creating and interacting with vector stores, you can use the polling helpers to monitor the status of operations. -For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once. - -```ts -const fileList = [ - createReadStream('/home/data/example.pdf'), - ... -]; - -const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, {files: fileList}); -``` - -### Streaming Helpers - -The SDK also includes helpers to process streams and handle the incoming events. - -```ts -const run = openai.beta.threads.runs - .stream(thread.id, { - assistant_id: assistant.id, - }) - .on('textCreated', (text) => process.stdout.write('\nassistant > ')) - .on('textDelta', (textDelta, snapshot) => process.stdout.write(textDelta.value)) - .on('toolCallCreated', (toolCall) => process.stdout.write(`\nassistant > ${toolCall.type}\n\n`)) - .on('toolCallDelta', (toolCallDelta, snapshot) => { - if (toolCallDelta.type === 'code_interpreter') { - if (toolCallDelta.code_interpreter.input) { - process.stdout.write(toolCallDelta.code_interpreter.input); - } - if (toolCallDelta.code_interpreter.outputs) { - process.stdout.write('\noutput >\n'); - toolCallDelta.code_interpreter.outputs.forEach((output) => { - if (output.type === 'logs') { - process.stdout.write(`\n${output.logs}\n`); - } - }); - } - } - }); -``` - -More information on streaming helpers can be found in the dedicated documentation: [helpers.md](helpers.md) +If you need to cancel a stream, you can `break` from the loop or call `stream.controller.abort()`. -### Streaming responses +### Chat Completion streaming helpers -This library provides several conveniences for streaming chat completions, for example: +This library also provides several conveniences for streaming chat completions, for example: ```ts import OpenAI from 'openai'; @@ -292,98 +114,32 @@ async function main() { main(); ``` -Streaming with `openai.beta.chat.completions.stream({…})` exposes -[various helpers for your convenience](helpers.md#chat-events) including event handlers and promises. - -Alternatively, you can use `openai.chat.completions.create({ stream: true, … })` -which only returns an async iterable of the chunks in the stream and thus uses less memory -(it does not build up a final chat completion object for you). - -If you need to cancel a stream, you can `break` from a `for await` loop or call `stream.abort()`. - -### Automated function calls +See [helpers.md](helpers.md#chat-events) for more details. -We provide the `openai.beta.chat.completions.runTools({…})` -convenience helper for using function tool calls with the `/chat/completions` endpoint -which automatically call the JavaScript functions you provide -and sends their results back to the `/chat/completions` endpoint, -looping as long as the model requests tool calls. - -If you pass a `parse` function, it will automatically parse the `arguments` for you -and returns any parsing errors to the model to attempt auto-recovery. -Otherwise, the args will be passed to the function you provide as a string. +### Request & Response types -If you pass `tool_choice: {function: {name: …}}` instead of `auto`, -it returns immediately after calling that function (and only loops to auto-recover parsing errors). +This library includes TypeScript definitions for all request params and response fields. You may import and use them like so: + ```ts import OpenAI from 'openai'; -const client = new OpenAI(); +const client = new OpenAI({ + apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted +}); async function main() { - const runner = client.beta.chat.completions - .runTools({ - model: 'gpt-4o', - messages: [{ role: 'user', content: 'How is the weather this week?' }], - tools: [ - { - type: 'function', - function: { - function: getCurrentLocation, - parameters: { type: 'object', properties: {} }, - }, - }, - { - type: 'function', - function: { - function: getWeather, - parse: JSON.parse, // or use a validation library like zod for typesafe parsing. - parameters: { - type: 'object', - properties: { - location: { type: 'string' }, - }, - }, - }, - }, - ], - }) - .on('message', (message) => console.log(message)); - - const finalContent = await runner.finalContent(); - console.log(); - console.log('Final content:', finalContent); -} - -async function getCurrentLocation() { - return 'Boston'; // Simulate lookup -} - -async function getWeather(args: { location: string }) { - const { location } = args; - // … do lookup … - return { temperature, precipitation }; + const params: OpenAI.Chat.ChatCompletionCreateParams = { + messages: [{ role: 'user', content: 'Say this is a test' }], + model: 'gpt-4o', + }; + const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params); } main(); - -// {role: "user", content: "How's the weather this week?"} -// {role: "assistant", tool_calls: [{type: "function", function: {name: "getCurrentLocation", arguments: "{}"}, id: "123"} -// {role: "tool", name: "getCurrentLocation", content: "Boston", tool_call_id: "123"} -// {role: "assistant", tool_calls: [{type: "function", function: {name: "getWeather", arguments: '{"location": "Boston"}'}, id: "1234"}]} -// {role: "tool", name: "getWeather", content: '{"temperature": "50degF", "preciptation": "high"}', tool_call_id: "1234"} -// {role: "assistant", content: "It's looking cold and rainy - you might want to wear a jacket!"} -// -// Final content: "It's looking cold and rainy - you might want to wear a jacket!" ``` -Like with `.stream()`, we provide a variety of [helpers and events](helpers.md#chat-events). - -Note that `runFunctions` was previously available as well, but has been deprecated in favor of `runTools`. - -Read more about various examples such as with integrating with [zod](helpers.md#integrate-with-zod), -[next.js](helpers.md#integrate-with-nextjs), and [proxying a stream to the browser](helpers.md#proxy-streaming-to-a-browser). +Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors. ## File uploads @@ -434,6 +190,7 @@ async function main() { .create({ model: 'gpt-4o', training_file: 'file-abc123' }) .catch(async (err) => { if (err instanceof OpenAI.APIError) { + console.log(err.request_id); console.log(err.status); // 400 console.log(err.name); // BadRequestError console.log(err.headers); // {server: 'nginx', ...} @@ -459,76 +216,6 @@ Error codes are as followed: | >=500 | `InternalServerError` | | N/A | `APIConnectionError` | -## Request IDs - -> For more information on debugging requests, see [these docs](https://platform.openai.com/docs/api-reference/debugging-requests) - -All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI. - -```ts -const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }); -console.log(completion._request_id) // req_123 -``` - -You can also access the Request ID using the `.withResponse()` method: - -```ts -const { data: stream, request_id } = await openai.chat.completions - .create({ - model: 'gpt-4', - messages: [{ role: 'user', content: 'Say this is a test' }], - stream: true, - }) - .withResponse(); -``` - -## Microsoft Azure OpenAI - -To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI` -class instead of the `OpenAI` class. - -> [!IMPORTANT] -> The Azure API shape slightly differs from the core API shape which means that the static types for responses / params -> won't always be correct. - -```ts -import { AzureOpenAI } from 'openai'; -import { getBearerTokenProvider, DefaultAzureCredential } from '@azure/identity'; - -const credential = new DefaultAzureCredential(); -const scope = 'https://cognitiveservices.azure.com/.default'; -const azureADTokenProvider = getBearerTokenProvider(credential, scope); - -const openai = new AzureOpenAI({ azureADTokenProvider, apiVersion: "" }); - -const result = await openai.chat.completions.create({ - model: 'gpt-4o', - messages: [{ role: 'user', content: 'Say hello!' }], -}); - -console.log(result.choices[0]!.message?.content); -``` - -### Realtime API -This SDK provides real-time streaming capabilities for Azure OpenAI through the `OpenAIRealtimeWS` and `OpenAIRealtimeWebSocket` clients described previously. - -To utilize the real-time features, begin by creating a fully configured `AzureOpenAI` client and passing it into either `OpenAIRealtimeWS.azure` or `OpenAIRealtimeWebSocket.azure`. For example: - -```ts -const cred = new DefaultAzureCredential(); -const scope = 'https://cognitiveservices.azure.com/.default'; -const deploymentName = 'gpt-4o-realtime-preview-1001'; -const azureADTokenProvider = getBearerTokenProvider(cred, scope); -const client = new AzureOpenAI({ - azureADTokenProvider, - apiVersion: '2024-10-01-preview', - deployment: deploymentName, -}); -const rt = await OpenAIRealtimeWS.azure(client); -``` - -Once the instance has been created, you can then begin sending requests and receiving streaming responses in real time. - ### Retries Certain errors will be automatically retried 2 times by default, with a short exponential backoff. @@ -571,6 +258,29 @@ On timeout, an `APIConnectionTimeoutError` is thrown. Note that requests which time out will be [retried twice by default](#retries). +## Request IDs + +> For more information on debugging requests, see [these docs](https://platform.openai.com/docs/api-reference/debugging-requests) + +All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI. + +```ts +const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }); +console.log(completion._request_id) // req_123 +``` + +You can also access the Request ID using the `.withResponse()` method: + +```ts +const { data: stream, request_id } = await openai.chat.completions + .create({ + model: 'gpt-4', + messages: [{ role: 'user', content: 'Say this is a test' }], + stream: true, + }) + .withResponse(); +``` + ## Auto-pagination List methods in the OpenAI API are paginated. @@ -602,6 +312,55 @@ while (page.hasNextPage()) { } ``` +## Realtime API Beta + +The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as [function calling](https://platform.openai.com/docs/guides/function-calling) through a `WebSocket` connection. + +```ts +import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; + +const rt = new OpenAIRealtimeWebSocket({ model: 'gpt-4o-realtime-preview-2024-12-17' }); + +rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); +``` + +For more information see [realtime.md](realtime.md). + +## Microsoft Azure OpenAI + +To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI` +class instead of the `OpenAI` class. + +> [!IMPORTANT] +> The Azure API shape slightly differs from the core API shape which means that the static types for responses / params +> won't always be correct. + +```ts +import { AzureOpenAI } from 'openai'; +import { getBearerTokenProvider, DefaultAzureCredential } from '@azure/identity'; + +const credential = new DefaultAzureCredential(); +const scope = 'https://cognitiveservices.azure.com/.default'; +const azureADTokenProvider = getBearerTokenProvider(credential, scope); + +const openai = new AzureOpenAI({ azureADTokenProvider, apiVersion: "" }); + +const result = await openai.chat.completions.create({ + model: 'gpt-4o', + messages: [{ role: 'user', content: 'Say hello!' }], +}); + +console.log(result.choices[0]!.message?.content); +``` + +For more information on support for the Azure API, see [azure.md](azure.md). + +## Automated function calls + +We provide the `openai.beta.chat.completions.runTools({…})` convenience helper for using function tool calls with the `/chat/completions` endpoint which automatically call the JavaScript functions you provide and sends their results back to the `/chat/completions` endpoint, looping as long as the model requests tool calls. + +For more information see [helpers.md](helpers.md#automated-function-calls). + ## Advanced Usage ### Accessing raw Response data (e.g., headers) diff --git a/azure.md b/azure.md new file mode 100644 index 000000000..df06c2985 --- /dev/null +++ b/azure.md @@ -0,0 +1,49 @@ +# Microsoft Azure OpenAI + +To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI` +class instead of the `OpenAI` class. + +> [!IMPORTANT] +> The Azure API shape slightly differs from the core API shape which means that the static types for responses / params +> won't always be correct. + +```ts +import { AzureOpenAI } from 'openai'; +import { getBearerTokenProvider, DefaultAzureCredential } from '@azure/identity'; + +const credential = new DefaultAzureCredential(); +const scope = 'https://cognitiveservices.azure.com/.default'; +const azureADTokenProvider = getBearerTokenProvider(credential, scope); + +const openai = new AzureOpenAI({ azureADTokenProvider, apiVersion: "" }); + +const result = await openai.chat.completions.create({ + model: 'gpt-4o', + messages: [{ role: 'user', content: 'Say hello!' }], +}); + +console.log(result.choices[0]!.message?.content); +``` + +For more information on support for the Azure API, see [azure.md](azure.md). + +## Realtime API + +This SDK provides real-time streaming capabilities for Azure OpenAI through the `OpenAIRealtimeWS` and `OpenAIRealtimeWebSocket` clients described previously. + +To utilize the real-time features, begin by creating a fully configured `AzureOpenAI` client and passing it into either `OpenAIRealtimeWS.azure` or `OpenAIRealtimeWebSocket.azure`. For example: + +```ts +const cred = new DefaultAzureCredential(); +const scope = 'https://cognitiveservices.azure.com/.default'; +const deploymentName = 'gpt-4o-realtime-preview-1001'; +const azureADTokenProvider = getBearerTokenProvider(cred, scope); +const client = new AzureOpenAI({ + azureADTokenProvider, + apiVersion: '2024-10-01-preview', + deployment: deploymentName, +}); +const rt = await OpenAIRealtimeWS.azure(client); +``` + +Once the instance has been created, you can then begin sending requests and receiving streaming responses in real time. diff --git a/helpers.md b/helpers.md index 16bc1f277..41b352e5e 100644 --- a/helpers.md +++ b/helpers.md @@ -142,9 +142,7 @@ More information can be found in the documentation: [Assistant Streaming](https: ```ts const run = openai.beta.threads.runs - .stream(thread.id, { - assistant_id: assistant.id, - }) + .stream(thread.id, { assistant_id: assistant.id }) .on('textCreated', (text) => process.stdout.write('\nassistant > ')) .on('textDelta', (textDelta, snapshot) => process.stdout.write(textDelta.value)) .on('toolCallCreated', (toolCall) => process.stdout.write(`\nassistant > ${toolCall.type}\n\n`)) @@ -304,47 +302,87 @@ If you need to cancel a stream, you can `break` from a `for await` loop or call See an example of streaming helpers in action in [`examples/stream.ts`](examples/stream.ts). -### Automated Function Calls +### Automated function calls -```ts -openai.chat.completions.runTools({ stream: false, … }, options?): ChatCompletionRunner -openai.chat.completions.runTools({ stream: true, … }, options?): ChatCompletionStreamingRunner -``` +We provide the `openai.beta.chat.completions.runTools({…})` +convenience helper for using function tool calls with the `/chat/completions` endpoint +which automatically call the JavaScript functions you provide +and sends their results back to the `/chat/completions` endpoint, +looping as long as the model requests tool calls. -`openai.chat.completions.runTools()` returns a Runner -for automating function calls with chat completions. -The runner automatically calls the JavaScript functions you provide and sends their results back -to the API, looping as long as the model requests function calls. +If you pass a `parse` function, it will automatically parse the `arguments` for you +and returns any parsing errors to the model to attempt auto-recovery. +Otherwise, the args will be passed to the function you provide as a string. -If you pass a `parse` function, it will automatically parse the `arguments` for you and returns any parsing -errors to the model to attempt auto-recovery. Otherwise, the args will be passed to the function you provide -as a string. +If you pass `tool_choice: {function: {name: …}}` instead of `auto`, +it returns immediately after calling that function (and only loops to auto-recover parsing errors). ```ts -client.chat.completions.runTools({ - model: 'gpt-3.5-turbo', - messages: [{ role: 'user', content: 'How is the weather this week?' }], - tools: [ - { - type: 'function', - function: { - function: getWeather as (args: { location: string; time: Date }) => any, - parse: parseFunction as (args: strings) => { location: string; time: Date }, - parameters: { - type: 'object', - properties: { - location: { type: 'string' }, - time: { type: 'string', format: 'date-time' }, +import OpenAI from 'openai'; + +const client = new OpenAI(); + +async function main() { + const runner = client.beta.chat.completions + .runTools({ + model: 'gpt-4o', + messages: [{ role: 'user', content: 'How is the weather this week?' }], + tools: [ + { + type: 'function', + function: { + function: getCurrentLocation, + parameters: { type: 'object', properties: {} }, }, }, - }, - }, - ], -}); + { + type: 'function', + function: { + function: getWeather, + parse: JSON.parse, // or use a validation library like zod for typesafe parsing. + parameters: { + type: 'object', + properties: { + location: { type: 'string' }, + }, + }, + }, + }, + ], + }) + .on('message', (message) => console.log(message)); + + const finalContent = await runner.finalContent(); + console.log(); + console.log('Final content:', finalContent); +} + +async function getCurrentLocation() { + return 'Boston'; // Simulate lookup +} + +async function getWeather(args: { location: string }) { + const { location } = args; + // … do lookup … + return { temperature, precipitation }; +} + +main(); + +// {role: "user", content: "How's the weather this week?"} +// {role: "assistant", tool_calls: [{type: "function", function: {name: "getCurrentLocation", arguments: "{}"}, id: "123"} +// {role: "tool", name: "getCurrentLocation", content: "Boston", tool_call_id: "123"} +// {role: "assistant", tool_calls: [{type: "function", function: {name: "getWeather", arguments: '{"location": "Boston"}'}, id: "1234"}]} +// {role: "tool", name: "getWeather", content: '{"temperature": "50degF", "preciptation": "high"}', tool_call_id: "1234"} +// {role: "assistant", content: "It's looking cold and rainy - you might want to wear a jacket!"} +// +// Final content: "It's looking cold and rainy - you might want to wear a jacket!" ``` -If you pass `function_call: {name: …}` instead of `auto`, it returns immediately after calling that -function (and only loops to auto-recover parsing errors). +Like with `.stream()`, we provide a variety of [helpers and events](helpers.md#chat-events). + +Read more about various examples such as with integrating with [zod](#integrate-with-zod), +[next.js](#integrate-with-nextjs), and [proxying a stream to the browser](#proxy-streaming-to-a-browser). By default, we run the loop up to 10 chat completions from the API. You can change this behavior by adjusting `maxChatCompletions` in the request options object. Note that `max_tokens` is the limit per @@ -662,3 +700,17 @@ client.beta.vectorStores.files.createAndPoll((...) client.beta.vectorStores.fileBatches.createAndPoll((...) client.beta.vectorStores.fileBatches.uploadAndPoll((...) ``` + +# Bulk Upload Helpers + +When creating and interacting with vector stores, you can use the polling helpers to monitor the status of operations. +For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once. + +```ts +const fileList = [ + createReadStream('/home/data/example.pdf'), + ... +]; + +const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, {files: fileList}); +``` diff --git a/realtime.md b/realtime.md new file mode 100644 index 000000000..2fcd17e9e --- /dev/null +++ b/realtime.md @@ -0,0 +1,87 @@ +## Realtime API beta + +The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as [function calling](https://platform.openai.com/docs/guides/function-calling) through a `WebSocket` connection. + +The Realtime API works through a combination of client-sent events and server-sent events. Clients can send events to do things like update session configuration or send text and audio inputs. Server events confirm when audio responses have completed, or when a text response from the model has been received. A full event reference can be found [here](https://platform.openai.com/docs/api-reference/realtime-client-events) and a guide can be found [here](https://platform.openai.com/docs/guides/realtime). + +This SDK supports accessing the Realtime API through the [WebSocket API](https://developer.mozilla.org/en-US/docs/Web/API/WebSocket) or with [ws](https://github.com/websockets/ws). + +Basic text based example with `ws`: + +```ts +// requires `yarn add ws @types/ws` +import { OpenAIRealtimeWS } from 'openai/beta/realtime/ws'; + +const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); + +// access the underlying `ws.WebSocket` instance +rt.socket.on('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); +}); + +rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; +}); + +rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); +}); + +rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); +rt.on('response.text.done', () => console.log()); + +rt.on('response.done', () => rt.close()); + +rt.socket.on('close', () => console.log('\nConnection closed!')); +``` + +To use the web API `WebSocket` implementation, replace `OpenAIRealtimeWS` with `OpenAIRealtimeWebSocket` and adjust any `rt.socket` access: + +```ts +import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; + +const rt = new OpenAIRealtimeWebSocket({ model: 'gpt-4o-realtime-preview-2024-12-17' }); +// ... +rt.socket.addEventListener('open', () => { + // ... +}); +``` + +A full example can be found [here](https://github.com/openai/openai-node/blob/master/examples/realtime/websocket.ts). + +### Realtime error handling + +When an error is encountered, either on the client side or returned from the server through the [`error` event](https://platform.openai.com/docs/guides/realtime-model-capabilities#error-handling), the `error` event listener will be fired. However, if you haven't registered an `error` event listener then an `unhandled Promise rejection` error will be thrown. + +It is **highly recommended** that you register an `error` event listener and handle errors approriately as typically the underlying connection is still usable. + +```ts +const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); +rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; +}); +``` + From 23c194b4b927e50d0f5a78272e9ac50b181c53eb Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 6 Feb 2025 15:16:31 +0000 Subject: [PATCH 120/246] feat(pagination): avoid fetching when has_more: false (#1305) --- .stats.yml | 2 +- src/pagination.ts | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 8a5d2c06b..d59a86d22 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-7c699d4503077d06a4a44f52c0c1f902d19a87c766b8be75b97c8dfd484ad4aa.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-dfb00c627f58e5180af7a9b29ed2f2aa0764a3b9daa6a32a1cc45bc8e48dfe15.yml diff --git a/src/pagination.ts b/src/pagination.ts index 63644e333..ad90a3a74 100644 --- a/src/pagination.ts +++ b/src/pagination.ts @@ -43,6 +43,8 @@ export class Page extends AbstractPage implements PageResponse export interface CursorPageResponse { data: Array; + + has_more: boolean; } export interface CursorPageParams { @@ -57,6 +59,8 @@ export class CursorPage { data: Array; + has_more: boolean; + constructor( client: APIClient, response: Response, @@ -66,12 +70,21 @@ export class CursorPage super(client, response, body, options); this.data = body.data || []; + this.has_more = body.has_more || false; } getPaginatedItems(): Item[] { return this.data ?? []; } + override hasNextPage() { + if (this.has_more === false) { + return false; + } + + return super.hasNextPage(); + } + // @deprecated Please use `nextPageInfo()` instead nextPageParams(): Partial | null { const info = this.nextPageInfo(); From 2d071dfd9e507e3a37177d1f96a5438ba9ac1268 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 10 Feb 2025 12:12:44 +0000 Subject: [PATCH 121/246] chore(internal): remove segfault-handler dependency --- ecosystem-tests/cli.ts | 4 ---- package.json | 1 - yarn.lock | 25 ------------------------- 3 files changed, 30 deletions(-) diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index 00120e5f9..4803b47c2 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -4,10 +4,6 @@ import yargs from 'yargs'; import assert from 'assert'; import path from 'path'; -// @ts-ignore -var SegfaultHandler = require('segfault-handler'); -SegfaultHandler.registerHandler('crash.log'); - const TAR_NAME = 'openai.tgz'; const PACK_FOLDER = '.pack'; const PACK_FILE = `${PACK_FOLDER}/${TAR_NAME}`; diff --git a/package.json b/package.json index bd507e9f8..df2dcd2bc 100644 --- a/package.json +++ b/package.json @@ -47,7 +47,6 @@ "jest": "^29.4.0", "prettier": "^3.0.0", "prettier-2": "npm:prettier@^2", - "segfault-handler": "^1.3.0", "ts-jest": "^29.1.0", "ts-node": "^10.5.0", "tsc-multi": "^1.1.0", diff --git a/yarn.lock b/yarn.lock index 0a4307f70..ad5fb7630 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1176,13 +1176,6 @@ big-integer@^1.6.44: resolved "https://registry.yarnpkg.com/big-integer/-/big-integer-1.6.52.tgz#60a887f3047614a8e1bffe5d7173490a97dc8c85" integrity sha512-QxD8cf2eVqJOOz63z6JIN9BzvVs/dlySa5HGSBH5xtR8dPteIRQnBxxKqkNTiT6jbDTF6jAfrd4oMcND9RGbQg== -bindings@^1.2.1: - version "1.5.0" - resolved "https://registry.yarnpkg.com/bindings/-/bindings-1.5.0.tgz#10353c9e945334bc0511a6d90b38fbc7c9c504df" - integrity sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ== - dependencies: - file-uri-to-path "1.0.0" - bplist-parser@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/bplist-parser/-/bplist-parser-0.2.0.tgz#43a9d183e5bf9d545200ceac3e712f79ebbe8d0e" @@ -1760,11 +1753,6 @@ file-entry-cache@^6.0.1: dependencies: flat-cache "^3.0.4" -file-uri-to-path@1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz#553a7b8446ff6f684359c445f1e37a05dacc33dd" - integrity sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw== - fill-range@^7.1.1: version "7.1.1" resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" @@ -2706,11 +2694,6 @@ ms@^2.0.0, ms@^2.1.3: resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== -nan@^2.14.0: - version "2.22.0" - resolved "https://registry.yarnpkg.com/nan/-/nan-2.22.0.tgz#31bc433fc33213c97bad36404bb68063de604de3" - integrity sha512-nbajikzWTMwsW+eSsNm3QwlOs7het9gGJU5dDZzRTQGk03vyBOauxgI4VakDzE0PtsGTmXPsXTbbjVhRwR5mpw== - natural-compare@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" @@ -3061,14 +3044,6 @@ safe-buffer@~5.2.0: resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== -segfault-handler@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/segfault-handler/-/segfault-handler-1.3.0.tgz#054bc847832fa14f218ba6a79e42877501c8870e" - integrity sha512-p7kVHo+4uoYkr0jmIiTBthwV5L2qmWtben/KDunDZ834mbos+tY+iO0//HpAJpOFSQZZ+wxKWuRo4DxV02B7Lg== - dependencies: - bindings "^1.2.1" - nan "^2.14.0" - semver@^6.3.0, semver@^6.3.1: version "6.3.1" resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" From 85ff876a75147490e60c70c2f36e964513f1086a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 10 Feb 2025 20:06:34 +0000 Subject: [PATCH 122/246] fix: correctly decode multi-byte characters over multiple chunks (#1316) --- src/internal/decoders/line.ts | 107 ++++++++++++++++++++++------------ src/streaming.ts | 6 +- tests/streaming.test.ts | 53 ++++++++++++++++- 3 files changed, 126 insertions(+), 40 deletions(-) diff --git a/src/internal/decoders/line.ts b/src/internal/decoders/line.ts index 34e41d1dc..66f62c057 100644 --- a/src/internal/decoders/line.ts +++ b/src/internal/decoders/line.ts @@ -13,52 +13,58 @@ export class LineDecoder { static NEWLINE_CHARS = new Set(['\n', '\r']); static NEWLINE_REGEXP = /\r\n|[\n\r]/g; - buffer: string[]; - trailingCR: boolean; + buffer: Uint8Array; + #carriageReturnIndex: number | null; textDecoder: any; // TextDecoder found in browsers; not typed to avoid pulling in either "dom" or "node" types. constructor() { - this.buffer = []; - this.trailingCR = false; + this.buffer = new Uint8Array(); + this.#carriageReturnIndex = null; } decode(chunk: Bytes): string[] { - let text = this.decodeText(chunk); - - if (this.trailingCR) { - text = '\r' + text; - this.trailingCR = false; - } - if (text.endsWith('\r')) { - this.trailingCR = true; - text = text.slice(0, -1); - } - - if (!text) { + if (chunk == null) { return []; } - const trailingNewline = LineDecoder.NEWLINE_CHARS.has(text[text.length - 1] || ''); - let lines = text.split(LineDecoder.NEWLINE_REGEXP); + const binaryChunk = + chunk instanceof ArrayBuffer ? new Uint8Array(chunk) + : typeof chunk === 'string' ? new TextEncoder().encode(chunk) + : chunk; + + let newData = new Uint8Array(this.buffer.length + binaryChunk.length); + newData.set(this.buffer); + newData.set(binaryChunk, this.buffer.length); + this.buffer = newData; + + const lines: string[] = []; + let patternIndex; + while ((patternIndex = findNewlineIndex(this.buffer, this.#carriageReturnIndex)) != null) { + if (patternIndex.carriage && this.#carriageReturnIndex == null) { + // skip until we either get a corresponding `\n`, a new `\r` or nothing + this.#carriageReturnIndex = patternIndex.index; + continue; + } - // if there is a trailing new line then the last entry will be an empty - // string which we don't care about - if (trailingNewline) { - lines.pop(); - } + // we got double \r or \rtext\n + if ( + this.#carriageReturnIndex != null && + (patternIndex.index !== this.#carriageReturnIndex + 1 || patternIndex.carriage) + ) { + lines.push(this.decodeText(this.buffer.slice(0, this.#carriageReturnIndex - 1))); + this.buffer = this.buffer.slice(this.#carriageReturnIndex); + this.#carriageReturnIndex = null; + continue; + } - if (lines.length === 1 && !trailingNewline) { - this.buffer.push(lines[0]!); - return []; - } + const endIndex = + this.#carriageReturnIndex !== null ? patternIndex.preceding - 1 : patternIndex.preceding; - if (this.buffer.length > 0) { - lines = [this.buffer.join('') + lines[0], ...lines.slice(1)]; - this.buffer = []; - } + const line = this.decodeText(this.buffer.slice(0, endIndex)); + lines.push(line); - if (!trailingNewline) { - this.buffer = [lines.pop() || '']; + this.buffer = this.buffer.slice(patternIndex.index); + this.#carriageReturnIndex = null; } return lines; @@ -102,13 +108,38 @@ export class LineDecoder { } flush(): string[] { - if (!this.buffer.length && !this.trailingCR) { + if (!this.buffer.length) { return []; } + return this.decode('\n'); + } +} - const lines = [this.buffer.join('')]; - this.buffer = []; - this.trailingCR = false; - return lines; +/** + * This function searches the buffer for the end patterns, (\r or \n) + * and returns an object with the index preceding the matched newline and the + * index after the newline char. `null` is returned if no new line is found. + * + * ```ts + * findNewLineIndex('abc\ndef') -> { preceding: 2, index: 3 } + * ``` + */ +function findNewlineIndex( + buffer: Uint8Array, + startIndex: number | null, +): { preceding: number; index: number; carriage: boolean } | null { + const newline = 0x0a; // \n + const carriage = 0x0d; // \r + + for (let i = startIndex ?? 0; i < buffer.length; i++) { + if (buffer[i] === newline) { + return { preceding: i, index: i + 1, carriage: false }; + } + + if (buffer[i] === carriage) { + return { preceding: i, index: i + 1, carriage: true }; + } } + + return null; } diff --git a/src/streaming.ts b/src/streaming.ts index 6a57a50a0..1d1ae344b 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -346,13 +346,17 @@ class SSEDecoder { } /** This is an internal helper function that's just used for testing */ -export function _decodeChunks(chunks: string[]): string[] { +export function _decodeChunks(chunks: string[], { flush }: { flush: boolean } = { flush: false }): string[] { const decoder = new LineDecoder(); const lines: string[] = []; for (const chunk of chunks) { lines.push(...decoder.decode(chunk)); } + if (flush) { + lines.push(...decoder.flush()); + } + return lines; } diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts index 6fe9a5781..8e5d0ca31 100644 --- a/tests/streaming.test.ts +++ b/tests/streaming.test.ts @@ -2,6 +2,7 @@ import { Response } from 'node-fetch'; import { PassThrough } from 'stream'; import assert from 'assert'; import { _iterSSEMessages, _decodeChunks as decodeChunks } from 'openai/streaming'; +import { LineDecoder } from 'openai/internal/decoders/line'; describe('line decoder', () => { test('basic', () => { @@ -10,8 +11,8 @@ describe('line decoder', () => { }); test('basic with \\r', () => { - // baz is not included because the line hasn't ended yet expect(decodeChunks(['foo', ' bar\r\nbaz'])).toEqual(['foo bar']); + expect(decodeChunks(['foo', ' bar\r\nbaz'], { flush: true })).toEqual(['foo bar', 'baz']); }); test('trailing new lines', () => { @@ -29,6 +30,56 @@ describe('line decoder', () => { test('escaped new lines with \\r', () => { expect(decodeChunks(['foo', ' bar\\r\\nbaz\n'])).toEqual(['foo bar\\r\\nbaz']); }); + + test('\\r & \\n split across multiple chunks', () => { + expect(decodeChunks(['foo\r', '\n', 'bar'], { flush: true })).toEqual(['foo', 'bar']); + }); + + test('single \\r', () => { + expect(decodeChunks(['foo\r', 'bar'], { flush: true })).toEqual(['foo', 'bar']); + }); + + test('double \\r', () => { + expect(decodeChunks(['foo\r', 'bar\r'], { flush: true })).toEqual(['foo', 'bar']); + expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + // implementation detail that we don't yield the single \r line until a new \r or \n is encountered + expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: false })).toEqual(['foo']); + }); + + test('double \\r then \\r\\n', () => { + expect(decodeChunks(['foo\r', '\r', '\r', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); + expect(decodeChunks(['foo\n', '\n', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); + }); + + test('double newline', () => { + expect(decodeChunks(['foo\n\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo', '\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo', '\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + }); + + test('multi-byte characters across chunks', () => { + const decoder = new LineDecoder(); + + // bytes taken from the string 'известни' and arbitrarily split + // so that some multi-byte characters span multiple chunks + expect(decoder.decode(new Uint8Array([0xd0]))).toHaveLength(0); + expect(decoder.decode(new Uint8Array([0xb8, 0xd0, 0xb7, 0xd0]))).toHaveLength(0); + expect( + decoder.decode(new Uint8Array([0xb2, 0xd0, 0xb5, 0xd1, 0x81, 0xd1, 0x82, 0xd0, 0xbd, 0xd0, 0xb8])), + ).toHaveLength(0); + + const decoded = decoder.decode(new Uint8Array([0xa])); + expect(decoded).toEqual(['известни']); + }); + + test('flushing trailing newlines', () => { + expect(decodeChunks(['foo\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + }); + + test('flushing empty buffer', () => { + expect(decodeChunks([], { flush: true })).toEqual([]); + }); }); describe('streaming decoding', () => { From 5e5a38a3f5bd45e74eb624fe85664294247bf580 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 11 Feb 2025 11:19:35 +0000 Subject: [PATCH 123/246] fix(assistants): handle `thread.run.incomplete` event --- src/lib/AssistantStream.ts | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts index caf68e7dd..9b6cc20c5 100644 --- a/src/lib/AssistantStream.ts +++ b/src/lib/AssistantStream.ts @@ -370,6 +370,7 @@ export class AssistantStream case 'thread.run.in_progress': case 'thread.run.requires_action': case 'thread.run.completed': + case 'thread.run.incomplete': case 'thread.run.failed': case 'thread.run.cancelling': case 'thread.run.cancelled': @@ -400,6 +401,8 @@ export class AssistantStream throw new Error( 'Encountered an error event in event processing - errors should be processed earlier', ); + default: + assertNever(event); } } @@ -772,3 +775,5 @@ export class AssistantStream return await this._createToolAssistantStream(runs, threadId, runId, params, options); } } + +function assertNever(_x: never) {} From 0ea723831b52ed22cadfc997ddb45a758e2247db Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 12 Feb 2025 05:07:11 +0000 Subject: [PATCH 124/246] release: 4.84.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 25 +++++++++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 29 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6eb0f130e..063dfb8fd 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.83.0" + ".": "4.84.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index f61def5e4..d18ddf815 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,30 @@ # Changelog +## 4.84.0 (2025-02-12) + +Full Changelog: [v4.83.0...v4.84.0](https://github.com/openai/openai-node/compare/v4.83.0...v4.84.0) + +### Features + +* **pagination:** avoid fetching when has_more: false ([#1305](https://github.com/openai/openai-node/issues/1305)) ([b6944c6](https://github.com/openai/openai-node/commit/b6944c634b53c9084f2ccf777c2491e89b2cc7af)) + + +### Bug Fixes + +* **api:** add missing reasoning effort + model enums ([#1302](https://github.com/openai/openai-node/issues/1302)) ([14c55c3](https://github.com/openai/openai-node/commit/14c55c312e31f1ed46d02f39a99049f785504a53)) +* **assistants:** handle `thread.run.incomplete` event ([7032cc4](https://github.com/openai/openai-node/commit/7032cc40b8aa0a58459cf114bceb8028a8517400)) +* correctly decode multi-byte characters over multiple chunks ([#1316](https://github.com/openai/openai-node/issues/1316)) ([dd776c4](https://github.com/openai/openai-node/commit/dd776c4867401f527f699bd4b9e567890256e849)) + + +### Chores + +* **internal:** remove segfault-handler dependency ([3521ca3](https://github.com/openai/openai-node/commit/3521ca34e7f5bd51542084e27c084a5d7cc5448b)) + + +### Documentation + +* **readme:** cleanup into multiple files ([da94424](https://github.com/openai/openai-node/commit/da944242e542e9e5e51cb11853c621fc6825ac02)) + ## 4.83.0 (2025-02-05) Full Changelog: [v4.82.0...v4.83.0](https://github.com/openai/openai-node/compare/v4.82.0...v4.83.0) diff --git a/jsr.json b/jsr.json index 6fa05e624..47c478074 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.83.0", + "version": "4.84.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index df2dcd2bc..96e9b048f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.83.0", + "version": "4.84.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 13c764d7d..b67556e78 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.83.0'; // x-release-please-version +export const VERSION = '4.84.0'; // x-release-please-version From 0e1981a128b4db5db657f22a54b711420ebbdb32 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 12 Feb 2025 16:01:16 +0000 Subject: [PATCH 125/246] fix(realtime): correct websocket type var constraint (#1321) --- src/beta/realtime/websocket.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/beta/realtime/websocket.ts b/src/beta/realtime/websocket.ts index e8143fdbf..b10a2519d 100644 --- a/src/beta/realtime/websocket.ts +++ b/src/beta/realtime/websocket.ts @@ -11,7 +11,7 @@ interface MessageEvent { type _WebSocket = typeof globalThis extends ( { - WebSocket: infer ws; + WebSocket: infer ws extends abstract new (...args: any) => any; } ) ? // @ts-ignore From c91ebef762fd55a553e15d7e4a1908243ea3e007 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 13 Feb 2025 05:07:08 +0000 Subject: [PATCH 126/246] release: 4.84.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 063dfb8fd..023314f41 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.84.0" + ".": "4.84.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d18ddf815..444430307 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.84.1 (2025-02-13) + +Full Changelog: [v4.84.0...v4.84.1](https://github.com/openai/openai-node/compare/v4.84.0...v4.84.1) + +### Bug Fixes + +* **realtime:** correct websocket type var constraint ([#1321](https://github.com/openai/openai-node/issues/1321)) ([afb17ea](https://github.com/openai/openai-node/commit/afb17ea6497b860ebbe5d8e68e4a97681dd307ff)) + ## 4.84.0 (2025-02-12) Full Changelog: [v4.83.0...v4.84.0](https://github.com/openai/openai-node/compare/v4.83.0...v4.84.0) diff --git a/jsr.json b/jsr.json index 47c478074..3148d6fca 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.84.0", + "version": "4.84.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 96e9b048f..4686e3a97 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.84.0", + "version": "4.84.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index b67556e78..767424b0e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.84.0'; // x-release-please-version +export const VERSION = '4.84.1'; // x-release-please-version From 6e9444c6c77a93ff4ce06bd5b27a9c236ba6f307 Mon Sep 17 00:00:00 2001 From: Jamon Holmgren Date: Thu, 13 Feb 2025 05:27:35 -0800 Subject: [PATCH 127/246] fix(realtime): call .toString() on WebSocket url (#1324) The [WebSocket spec at WHATWG](https://websockets.spec.whatwg.org/#ref-for-dom-websocket-websocket%E2%91%A0) indicates that the `url` parameter of the WebSocket constructor is a string. Some implementations (like Chrome) will accept a URL object, but calling .toString() should work for all cases. Fixes #1323. --- src/beta/realtime/websocket.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/beta/realtime/websocket.ts b/src/beta/realtime/websocket.ts index b10a2519d..e8900e809 100644 --- a/src/beta/realtime/websocket.ts +++ b/src/beta/realtime/websocket.ts @@ -53,7 +53,7 @@ export class OpenAIRealtimeWebSocket extends OpenAIRealtimeEmitter { props.onURL?.(this.url); // @ts-ignore - this.socket = new WebSocket(this.url, [ + this.socket = new WebSocket(this.url.toString(), [ 'realtime', ...(isAzure(client) ? [] : [`openai-insecure-api-key.${client.apiKey}`]), 'openai-beta.realtime-v1', From be1ca6b9a6732214ac21ca375b5b0a9b7f492fd6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 13 Feb 2025 19:41:49 +0000 Subject: [PATCH 128/246] feat(api): add support for storing chat completions (#1327) --- .stats.yml | 4 +- api.md | 72 ++++---- src/index.ts | 27 ++- src/lib/ChatCompletionStream.ts | 2 +- src/resources/chat/chat.ts | 19 +- .../chat/{ => completions}/completions.ts | 170 ++++++++++++++++-- src/resources/chat/completions/index.ts | 49 +++++ src/resources/chat/completions/messages.ts | 52 ++++++ src/resources/chat/index.ts | 10 +- src/resources/completions.ts | 4 +- src/resources/moderations.ts | 4 +- tests/api-resources/chat/completions.test.ts | 65 ------- .../chat/completions/completions.test.ts | 144 +++++++++++++++ .../chat/completions/messages.test.ts | 40 +++++ 14 files changed, 534 insertions(+), 128 deletions(-) rename src/resources/chat/{ => completions}/completions.ts (88%) create mode 100644 src/resources/chat/completions/index.ts create mode 100644 src/resources/chat/completions/messages.ts delete mode 100644 tests/api-resources/chat/completions.test.ts create mode 100644 tests/api-resources/chat/completions/completions.test.ts create mode 100644 tests/api-resources/chat/completions/messages.test.ts diff --git a/.stats.yml b/.stats.yml index d59a86d22..658877d3b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-dfb00c627f58e5180af7a9b29ed2f2aa0764a3b9daa6a32a1cc45bc8e48dfe15.yml +configured_endpoints: 74 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-4aa6ee65ba9efc789e05e6a5ef0883b2cadf06def8efd863dbf75e9e233067e1.yml diff --git a/api.md b/api.md index 01854a8e0..63f239628 100644 --- a/api.md +++ b/api.md @@ -32,39 +32,51 @@ Types: Types: -- ChatCompletion -- ChatCompletionAssistantMessageParam -- ChatCompletionAudio -- ChatCompletionAudioParam -- ChatCompletionChunk -- ChatCompletionContentPart -- ChatCompletionContentPartImage -- ChatCompletionContentPartInputAudio -- ChatCompletionContentPartRefusal -- ChatCompletionContentPartText -- ChatCompletionDeveloperMessageParam -- ChatCompletionFunctionCallOption -- ChatCompletionFunctionMessageParam -- ChatCompletionMessage -- ChatCompletionMessageParam -- ChatCompletionMessageToolCall -- ChatCompletionModality -- ChatCompletionNamedToolChoice -- ChatCompletionPredictionContent -- ChatCompletionReasoningEffort -- ChatCompletionRole -- ChatCompletionStreamOptions -- ChatCompletionSystemMessageParam -- ChatCompletionTokenLogprob -- ChatCompletionTool -- ChatCompletionToolChoiceOption -- ChatCompletionToolMessageParam -- ChatCompletionUserMessageParam -- CreateChatCompletionRequestMessage +- ChatCompletion +- ChatCompletionAssistantMessageParam +- ChatCompletionAudio +- ChatCompletionAudioParam +- ChatCompletionChunk +- ChatCompletionContentPart +- ChatCompletionContentPartImage +- ChatCompletionContentPartInputAudio +- ChatCompletionContentPartRefusal +- ChatCompletionContentPartText +- ChatCompletionDeleted +- ChatCompletionDeveloperMessageParam +- ChatCompletionFunctionCallOption +- ChatCompletionFunctionMessageParam +- ChatCompletionMessage +- ChatCompletionMessageParam +- ChatCompletionMessageToolCall +- ChatCompletionModality +- ChatCompletionNamedToolChoice +- ChatCompletionPredictionContent +- ChatCompletionReasoningEffort +- ChatCompletionRole +- ChatCompletionStoreMessage +- ChatCompletionStreamOptions +- ChatCompletionSystemMessageParam +- ChatCompletionTokenLogprob +- ChatCompletionTool +- ChatCompletionToolChoiceOption +- ChatCompletionToolMessageParam +- ChatCompletionUserMessageParam +- CreateChatCompletionRequestMessage Methods: -- client.chat.completions.create({ ...params }) -> ChatCompletion +- client.chat.completions.create({ ...params }) -> ChatCompletion +- client.chat.completions.retrieve(completionId) -> ChatCompletion +- client.chat.completions.update(completionId, { ...params }) -> ChatCompletion +- client.chat.completions.list({ ...params }) -> ChatCompletionsPage +- client.chat.completions.del(completionId) -> ChatCompletionDeleted + +### Messages + +Methods: + +- client.chat.completions.messages.list(completionId, { ...params }) -> ChatCompletionStoreMessagesPage # Embeddings diff --git a/src/index.ts b/src/index.ts index f4e940af8..debefce8c 100644 --- a/src/index.ts +++ b/src/index.ts @@ -66,6 +66,13 @@ import { import { Audio, AudioModel, AudioResponseFormat } from './resources/audio/audio'; import { Beta } from './resources/beta/beta'; import { Chat, ChatModel } from './resources/chat/chat'; +import { FineTuning } from './resources/fine-tuning/fine-tuning'; +import { + Upload, + UploadCompleteParams, + UploadCreateParams, + Uploads as UploadsAPIUploads, +} from './resources/uploads/uploads'; import { ChatCompletion, ChatCompletionAssistantMessageParam, @@ -80,9 +87,11 @@ import { ChatCompletionCreateParams, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, + ChatCompletionDeleted, ChatCompletionDeveloperMessageParam, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, + ChatCompletionListParams, ChatCompletionMessage, ChatCompletionMessageParam, ChatCompletionMessageToolCall, @@ -91,21 +100,17 @@ import { ChatCompletionPredictionContent, ChatCompletionReasoningEffort, ChatCompletionRole, + ChatCompletionStoreMessage, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, ChatCompletionTokenLogprob, ChatCompletionTool, ChatCompletionToolChoiceOption, ChatCompletionToolMessageParam, + ChatCompletionUpdateParams, ChatCompletionUserMessageParam, -} from './resources/chat/completions'; -import { FineTuning } from './resources/fine-tuning/fine-tuning'; -import { - Upload, - UploadCompleteParams, - UploadCreateParams, - Uploads as UploadsAPIUploads, -} from './resources/uploads/uploads'; + ChatCompletionsPage, +} from './resources/chat/completions/completions'; export interface ClientOptions { /** @@ -310,6 +315,7 @@ export class OpenAI extends Core.APIClient { OpenAI.Completions = Completions; OpenAI.Chat = Chat; +OpenAI.ChatCompletionsPage = ChatCompletionsPage; OpenAI.Embeddings = Embeddings; OpenAI.Files = Files; OpenAI.FileObjectsPage = FileObjectsPage; @@ -355,6 +361,7 @@ export declare namespace OpenAI { type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionDeleted as ChatCompletionDeleted, type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, @@ -366,6 +373,7 @@ export declare namespace OpenAI { type ChatCompletionPredictionContent as ChatCompletionPredictionContent, type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, + type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, @@ -373,9 +381,12 @@ export declare namespace OpenAI { type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming, + type ChatCompletionUpdateParams as ChatCompletionUpdateParams, + type ChatCompletionListParams as ChatCompletionListParams, }; export { diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts index 6c846f70b..35648c27b 100644 --- a/src/lib/ChatCompletionStream.ts +++ b/src/lib/ChatCompletionStream.ts @@ -13,7 +13,7 @@ import { type ChatCompletionCreateParamsStreaming, type ChatCompletionCreateParamsBase, type ChatCompletionRole, -} from '../resources/chat/completions'; +} from '../resources/chat/completions/completions'; import { AbstractChatCompletionRunner, type AbstractChatCompletionRunnerEvents, diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index d4a18929c..5bceec45a 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; -import * as CompletionsAPI from './completions'; +import * as CompletionsAPI from './completions/completions'; import { ChatCompletion, ChatCompletionAssistantMessageParam, @@ -16,9 +16,11 @@ import { ChatCompletionCreateParams, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, + ChatCompletionDeleted, ChatCompletionDeveloperMessageParam, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, + ChatCompletionListParams, ChatCompletionMessage, ChatCompletionMessageParam, ChatCompletionMessageToolCall, @@ -27,19 +29,24 @@ import { ChatCompletionPredictionContent, ChatCompletionReasoningEffort, ChatCompletionRole, + ChatCompletionStoreMessage, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, ChatCompletionTokenLogprob, ChatCompletionTool, ChatCompletionToolChoiceOption, ChatCompletionToolMessageParam, + ChatCompletionUpdateParams, ChatCompletionUserMessageParam, + ChatCompletionsPage, CompletionCreateParams, CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming, + CompletionListParams, + CompletionUpdateParams, Completions, CreateChatCompletionRequestMessage, -} from './completions'; +} from './completions/completions'; export class Chat extends APIResource { completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client); @@ -87,6 +94,7 @@ export type ChatModel = | 'gpt-3.5-turbo-16k-0613'; Chat.Completions = Completions; +Chat.ChatCompletionsPage = ChatCompletionsPage; export declare namespace Chat { export { type ChatModel as ChatModel }; @@ -103,6 +111,7 @@ export declare namespace Chat { type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionDeleted as ChatCompletionDeleted, type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, @@ -114,6 +123,7 @@ export declare namespace Chat { type ChatCompletionPredictionContent as ChatCompletionPredictionContent, type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, + type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, @@ -122,11 +132,16 @@ export declare namespace Chat { type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type CompletionCreateParams as CompletionCreateParams, type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming, type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + type ChatCompletionUpdateParams as ChatCompletionUpdateParams, + type CompletionUpdateParams as CompletionUpdateParams, + type ChatCompletionListParams as ChatCompletionListParams, + type CompletionListParams as CompletionListParams, }; } diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions/completions.ts similarity index 88% rename from src/resources/chat/completions.ts rename to src/resources/chat/completions/completions.ts index 2586845c3..3af4a3a1d 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -1,15 +1,21 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; -import { APIPromise } from '../../core'; -import * as Core from '../../core'; -import * as ChatCompletionsAPI from './completions'; -import * as CompletionsAPI from '../completions'; -import * as Shared from '../shared'; -import * as ChatAPI from './chat'; -import { Stream } from '../../streaming'; +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import { APIPromise } from '../../../core'; +import * as Core from '../../../core'; +import * as CompletionsCompletionsAPI from './completions'; +import * as CompletionsAPI from '../../completions'; +import * as Shared from '../../shared'; +import * as ChatAPI from '../chat'; +import * as MessagesAPI from './messages'; +import { MessageListParams, Messages } from './messages'; +import { CursorPage, type CursorPageParams } from '../../../pagination'; +import { Stream } from '../../../streaming'; export class Completions extends APIResource { + messages: MessagesAPI.Messages = new MessagesAPI.Messages(this._client); + /** * Creates a model response for the given chat conversation. Learn more in the * [text generation](https://platform.openai.com/docs/guides/text-generation), @@ -42,8 +48,60 @@ export class Completions extends APIResource { | APIPromise | APIPromise>; } + + /** + * Get a stored chat completion. Only chat completions that have been created with + * the `store` parameter set to `true` will be returned. + */ + retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/chat/completions/${completionId}`, options); + } + + /** + * Modify a stored chat completion. Only chat completions that have been created + * with the `store` parameter set to `true` can be modified. Currently, the only + * supported modification is to update the `metadata` field. + */ + update( + completionId: string, + body: ChatCompletionUpdateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/chat/completions/${completionId}`, { body, ...options }); + } + + /** + * List stored chat completions. Only chat completions that have been stored with + * the `store` parameter set to `true` will be returned. + */ + list( + query?: ChatCompletionListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list(options?: Core.RequestOptions): Core.PagePromise; + list( + query: ChatCompletionListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list({}, query); + } + return this._client.getAPIList('/chat/completions', ChatCompletionsPage, { query, ...options }); + } + + /** + * Delete a stored chat completion. Only chat completions that have been created + * with the `store` parameter set to `true` can be deleted. + */ + del(completionId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/chat/completions/${completionId}`, options); + } } +export class ChatCompletionsPage extends CursorPage {} + +export class ChatCompletionStoreMessagesPage extends CursorPage {} + /** * Represents a chat completion response returned by model, based on the provided * input. @@ -119,7 +177,7 @@ export namespace ChatCompletion { /** * A chat completion message generated by the model. */ - message: ChatCompletionsAPI.ChatCompletionMessage; + message: CompletionsCompletionsAPI.ChatCompletionMessage; } export namespace Choice { @@ -130,12 +188,12 @@ export namespace ChatCompletion { /** * A list of message content tokens with log probability information. */ - content: Array | null; + content: Array | null; /** * A list of message refusal tokens with log probability information. */ - refusal: Array | null; + refusal: Array | null; } } } @@ -437,12 +495,12 @@ export namespace ChatCompletionChunk { /** * A list of message content tokens with log probability information. */ - content: Array | null; + content: Array | null; /** * A list of message refusal tokens with log probability information. */ - refusal: Array | null; + refusal: Array | null; } } } @@ -537,6 +595,23 @@ export interface ChatCompletionContentPartText { type: 'text'; } +export interface ChatCompletionDeleted { + /** + * The ID of the chat completion that was deleted. + */ + id: string; + + /** + * Whether the chat completion was deleted. + */ + deleted: boolean; + + /** + * The type of object being deleted. + */ + object: 'chat.completion.deleted'; +} + /** * Developer-provided instructions that the model should follow, regardless of * messages sent by the user. With o1 models and newer, `developer` messages @@ -758,6 +833,16 @@ export type ChatCompletionReasoningEffort = 'low' | 'medium' | 'high' | null; */ export type ChatCompletionRole = 'developer' | 'system' | 'user' | 'assistant' | 'tool' | 'function'; +/** + * A chat completion message generated by the model. + */ +export interface ChatCompletionStoreMessage extends ChatCompletionMessage { + /** + * The identifier of the chat message. + */ + id: string; +} + /** * Options for streaming response. Only set this when you set `stream: true`. */ @@ -1229,8 +1314,9 @@ export namespace ChatCompletionCreateParams { } export type ChatCompletionCreateParamsNonStreaming = - ChatCompletionsAPI.ChatCompletionCreateParamsNonStreaming; - export type ChatCompletionCreateParamsStreaming = ChatCompletionsAPI.ChatCompletionCreateParamsStreaming; + CompletionsCompletionsAPI.ChatCompletionCreateParamsNonStreaming; + export type ChatCompletionCreateParamsStreaming = + CompletionsCompletionsAPI.ChatCompletionCreateParamsStreaming; } /** @@ -1272,6 +1358,51 @@ export interface ChatCompletionCreateParamsStreaming extends ChatCompletionCreat */ export type CompletionCreateParamsStreaming = ChatCompletionCreateParamsStreaming; +export interface ChatCompletionUpdateParams { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; +} + +/** + * @deprecated Use ChatCompletionUpdateParams instead + */ +export type CompletionUpdateParams = ChatCompletionUpdateParams; + +export interface ChatCompletionListParams extends CursorPageParams { + /** + * A list of metadata keys to filter the chat completions by. Example: + * + * `metadata[key1]=value1&metadata[key2]=value2` + */ + metadata?: Shared.Metadata | null; + + /** + * The model used to generate the chat completions. + */ + model?: string; + + /** + * Sort order for chat completions by timestamp. Use `asc` for ascending order or + * `desc` for descending order. Defaults to `asc`. + */ + order?: 'asc' | 'desc'; +} + +/** + * @deprecated Use ChatCompletionListParams instead + */ +export type CompletionListParams = ChatCompletionListParams; + +Completions.ChatCompletionsPage = ChatCompletionsPage; +Completions.Messages = Messages; + export declare namespace Completions { export { type ChatCompletion as ChatCompletion, @@ -1284,6 +1415,7 @@ export declare namespace Completions { type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionDeleted as ChatCompletionDeleted, type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, @@ -1295,6 +1427,7 @@ export declare namespace Completions { type ChatCompletionPredictionContent as ChatCompletionPredictionContent, type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, + type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, @@ -1303,11 +1436,18 @@ export declare namespace Completions { type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type CompletionCreateParams as CompletionCreateParams, type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming, type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + type ChatCompletionUpdateParams as ChatCompletionUpdateParams, + type CompletionUpdateParams as CompletionUpdateParams, + type ChatCompletionListParams as ChatCompletionListParams, + type CompletionListParams as CompletionListParams, }; + + export { Messages as Messages, type MessageListParams as MessageListParams }; } diff --git a/src/resources/chat/completions/index.ts b/src/resources/chat/completions/index.ts new file mode 100644 index 000000000..3691f41d8 --- /dev/null +++ b/src/resources/chat/completions/index.ts @@ -0,0 +1,49 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + ChatCompletionStoreMessagesPage, + ChatCompletionsPage, + Completions, + type ChatCompletion, + type ChatCompletionAssistantMessageParam, + type ChatCompletionAudio, + type ChatCompletionAudioParam, + type ChatCompletionChunk, + type ChatCompletionContentPart, + type ChatCompletionContentPartImage, + type ChatCompletionContentPartInputAudio, + type ChatCompletionContentPartRefusal, + type ChatCompletionContentPartText, + type ChatCompletionDeleted, + type ChatCompletionDeveloperMessageParam, + type ChatCompletionFunctionCallOption, + type ChatCompletionFunctionMessageParam, + type ChatCompletionMessage, + type ChatCompletionMessageParam, + type ChatCompletionMessageToolCall, + type ChatCompletionModality, + type ChatCompletionNamedToolChoice, + type ChatCompletionPredictionContent, + type ChatCompletionReasoningEffort, + type ChatCompletionRole, + type ChatCompletionStoreMessage, + type ChatCompletionStreamOptions, + type ChatCompletionSystemMessageParam, + type ChatCompletionTokenLogprob, + type ChatCompletionTool, + type ChatCompletionToolChoiceOption, + type ChatCompletionToolMessageParam, + type ChatCompletionUserMessageParam, + type CreateChatCompletionRequestMessage, + type ChatCompletionCreateParams, + type CompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming, + type CompletionCreateParamsNonStreaming, + type ChatCompletionCreateParamsStreaming, + type CompletionCreateParamsStreaming, + type ChatCompletionUpdateParams, + type CompletionUpdateParams, + type ChatCompletionListParams, + type CompletionListParams, +} from './completions'; +export { Messages, type MessageListParams } from './messages'; diff --git a/src/resources/chat/completions/messages.ts b/src/resources/chat/completions/messages.ts new file mode 100644 index 000000000..fc1cc5d94 --- /dev/null +++ b/src/resources/chat/completions/messages.ts @@ -0,0 +1,52 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; +import * as CompletionsAPI from './completions'; +import { ChatCompletionStoreMessagesPage } from './completions'; +import { type CursorPageParams } from '../../../pagination'; + +export class Messages extends APIResource { + /** + * Get the messages in a stored chat completion. Only chat completions that have + * been created with the `store` parameter set to `true` will be returned. + */ + list( + completionId: string, + query?: MessageListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + completionId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + completionId: string, + query: MessageListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(completionId, {}, query); + } + return this._client.getAPIList( + `/chat/completions/${completionId}/messages`, + ChatCompletionStoreMessagesPage, + { query, ...options }, + ); + } +} + +export interface MessageListParams extends CursorPageParams { + /** + * Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + * for descending order. Defaults to `asc`. + */ + order?: 'asc' | 'desc'; +} + +export declare namespace Messages { + export { type MessageListParams as MessageListParams }; +} + +export { ChatCompletionStoreMessagesPage }; diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index c3be19402..a9b5b46fb 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -2,6 +2,8 @@ export { Chat, type ChatModel } from './chat'; export { + ChatCompletionStoreMessagesPage, + ChatCompletionsPage, Completions, type ChatCompletion, type ChatCompletionAssistantMessageParam, @@ -13,6 +15,7 @@ export { type ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal, type ChatCompletionContentPartText, + type ChatCompletionDeleted, type ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam, @@ -24,6 +27,7 @@ export { type ChatCompletionPredictionContent, type ChatCompletionReasoningEffort, type ChatCompletionRole, + type ChatCompletionStoreMessage, type ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam, type ChatCompletionTokenLogprob, @@ -38,4 +42,8 @@ export { type CompletionCreateParamsNonStreaming, type ChatCompletionCreateParamsStreaming, type CompletionCreateParamsStreaming, -} from './completions'; + type ChatCompletionUpdateParams, + type CompletionUpdateParams, + type ChatCompletionListParams, + type CompletionListParams, +} from './completions/index'; diff --git a/src/resources/completions.ts b/src/resources/completions.ts index be75a46f0..664e39d9d 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -4,7 +4,7 @@ import { APIResource } from '../resource'; import { APIPromise } from '../core'; import * as Core from '../core'; import * as CompletionsAPI from './completions'; -import * as ChatCompletionsAPI from './chat/completions'; +import * as CompletionsCompletionsAPI from './chat/completions/completions'; import { Stream } from '../streaming'; export class Completions extends APIResource { @@ -311,7 +311,7 @@ export interface CompletionCreateParamsBase { /** * Options for streaming response. Only set this when you set `stream: true`. */ - stream_options?: ChatCompletionsAPI.ChatCompletionStreamOptions | null; + stream_options?: CompletionsCompletionsAPI.ChatCompletionStreamOptions | null; /** * The suffix that comes after a completion of inserted text. diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index f7b16166d..86e90376d 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -75,14 +75,14 @@ export namespace Moderation { * execution of wrongdoing, or that gives advice or instruction on how to commit * illicit acts. For example, "how to shoplift" would fit this category. */ - illicit: boolean; + illicit: boolean | null; /** * Content that includes instructions or advice that facilitate the planning or * execution of wrongdoing that also includes violence, or that gives advice or * instruction on the procurement of any weapon. */ - 'illicit/violent': boolean; + 'illicit/violent': boolean | null; /** * Content that promotes, encourages, or depicts acts of self-harm, such as diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts deleted file mode 100644 index 8f1bc7d4c..000000000 --- a/tests/api-resources/chat/completions.test.ts +++ /dev/null @@ -1,65 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import OpenAI from 'openai'; -import { Response } from 'node-fetch'; - -const client = new OpenAI({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', -}); - -describe('resource completions', () => { - test('create: only required params', async () => { - const responsePromise = client.chat.completions.create({ - messages: [{ content: 'string', role: 'developer' }], - model: 'gpt-4o', - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('create: required and optional params', async () => { - const response = await client.chat.completions.create({ - messages: [{ content: 'string', role: 'developer', name: 'name' }], - model: 'gpt-4o', - audio: { format: 'wav', voice: 'alloy' }, - frequency_penalty: -2, - function_call: 'none', - functions: [{ name: 'name', description: 'description', parameters: { foo: 'bar' } }], - logit_bias: { foo: 0 }, - logprobs: true, - max_completion_tokens: 0, - max_tokens: 0, - metadata: { foo: 'string' }, - modalities: ['text'], - n: 1, - parallel_tool_calls: true, - prediction: { content: 'string', type: 'content' }, - presence_penalty: -2, - reasoning_effort: 'low', - response_format: { type: 'text' }, - seed: 0, - service_tier: 'auto', - stop: 'string', - store: true, - stream: false, - stream_options: { include_usage: true }, - temperature: 1, - tool_choice: 'none', - tools: [ - { - function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, - type: 'function', - }, - ], - top_logprobs: 0, - top_p: 1, - user: 'user-1234', - }); - }); -}); diff --git a/tests/api-resources/chat/completions/completions.test.ts b/tests/api-resources/chat/completions/completions.test.ts new file mode 100644 index 000000000..acdd631db --- /dev/null +++ b/tests/api-resources/chat/completions/completions.test.ts @@ -0,0 +1,144 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource completions', () => { + test('create: only required params', async () => { + const responsePromise = client.chat.completions.create({ + messages: [{ content: 'string', role: 'developer' }], + model: 'gpt-4o', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.chat.completions.create({ + messages: [{ content: 'string', role: 'developer', name: 'name' }], + model: 'gpt-4o', + audio: { format: 'wav', voice: 'alloy' }, + frequency_penalty: -2, + function_call: 'none', + functions: [{ name: 'name', description: 'description', parameters: { foo: 'bar' } }], + logit_bias: { foo: 0 }, + logprobs: true, + max_completion_tokens: 0, + max_tokens: 0, + metadata: { foo: 'string' }, + modalities: ['text'], + n: 1, + parallel_tool_calls: true, + prediction: { content: 'string', type: 'content' }, + presence_penalty: -2, + reasoning_effort: 'low', + response_format: { type: 'text' }, + seed: 0, + service_tier: 'auto', + stop: 'string', + store: true, + stream: false, + stream_options: { include_usage: true }, + temperature: 1, + tool_choice: 'none', + tools: [ + { + function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, + type: 'function', + }, + ], + top_logprobs: 0, + top_p: 1, + user: 'user-1234', + }); + }); + + test('retrieve', async () => { + const responsePromise = client.chat.completions.retrieve('completion_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.retrieve('completion_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('update: only required params', async () => { + const responsePromise = client.chat.completions.update('completion_id', { metadata: { foo: 'string' } }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('update: required and optional params', async () => { + const response = await client.chat.completions.update('completion_id', { metadata: { foo: 'string' } }); + }); + + test('list', async () => { + const responsePromise = client.chat.completions.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.chat.completions.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.list( + { after: 'after', limit: 0, metadata: { foo: 'string' }, model: 'model', order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.chat.completions.del('completion_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.del('completion_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/chat/completions/messages.test.ts b/tests/api-resources/chat/completions/messages.test.ts new file mode 100644 index 000000000..664106cb9 --- /dev/null +++ b/tests/api-resources/chat/completions/messages.test.ts @@ -0,0 +1,40 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource messages', () => { + test('list', async () => { + const responsePromise = client.chat.completions.messages.list('completion_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.messages.list('completion_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.messages.list( + 'completion_id', + { after: 'after', limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); From f9897464738ddd6c3207be3530b03db7e522e52e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 13 Feb 2025 19:53:06 +0000 Subject: [PATCH 129/246] release: 4.85.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 023314f41..f48cc7f57 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.84.1" + ".": "4.85.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 444430307..290b2414d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.85.0 (2025-02-13) + +Full Changelog: [v4.84.1...v4.85.0](https://github.com/openai/openai-node/compare/v4.84.1...v4.85.0) + +### Features + +* **api:** add support for storing chat completions ([#1327](https://github.com/openai/openai-node/issues/1327)) ([8d77f8e](https://github.com/openai/openai-node/commit/8d77f8e3c4801b7fa1e7c6f50b48c1de1f43f3e6)) + + +### Bug Fixes + +* **realtime:** call .toString() on WebSocket url ([#1324](https://github.com/openai/openai-node/issues/1324)) ([09bc50d](https://github.com/openai/openai-node/commit/09bc50d439679b6acfd2441e69ee5aa18c00e5d9)) + ## 4.84.1 (2025-02-13) Full Changelog: [v4.84.0...v4.84.1](https://github.com/openai/openai-node/compare/v4.84.0...v4.84.1) diff --git a/jsr.json b/jsr.json index 3148d6fca..368f86c0b 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.84.1", + "version": "4.85.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 4686e3a97..dc61af02c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.84.1", + "version": "4.85.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 767424b0e..6483fa72b 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.84.1'; // x-release-please-version +export const VERSION = '4.85.0'; // x-release-please-version From 26d5868dd53045bc820a607100eab1070785f50c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 13 Feb 2025 22:37:36 +0000 Subject: [PATCH 130/246] fix(client): fix export map for index exports (#1328) --- package.json | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/package.json b/package.json index dc61af02c..46f58814d 100644 --- a/package.json +++ b/package.json @@ -112,17 +112,17 @@ "default": "./dist/index.mjs" }, "./*.mjs": { - "types": "./dist/*.d.ts", - "default": "./dist/*.mjs" + "types": ["./dist/*.d.ts", "./dist/*/index.d.ts"], + "default": ["./dist/*.mjs", "./dist/*/index.mjs"] }, "./*.js": { - "types": "./dist/*.d.ts", - "default": "./dist/*.js" + "types": ["./dist/*.d.ts", "./dist/*/index.d.ts"], + "default": ["./dist/*.js", "./dist/*/index.js"] }, "./*": { - "types": "./dist/*.d.ts", - "require": "./dist/*.js", - "default": "./dist/*.mjs" + "types": ["./dist/*.d.ts", "./dist/*/index.d.ts"], + "require": ["./dist/*.js", "./dist/*/index.js"], + "default": ["./dist/*.mjs", "./dist/*/index.mjs"] } }, "bin": "./bin/cli", From 1f38cc1976f4091a90a38d49e6ddc1c22e5c39ab Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 14 Feb 2025 10:19:22 +0000 Subject: [PATCH 131/246] fix(package): add chat/completions.ts back in (#1333) --- src/resources/chat/completions.ts | 1 + 1 file changed, 1 insertion(+) create mode 100644 src/resources/chat/completions.ts diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts new file mode 100644 index 000000000..55b151e8b --- /dev/null +++ b/src/resources/chat/completions.ts @@ -0,0 +1 @@ +export * from './completions/completions'; From 13aab101588c2eee1250d7c50b2abfeca1c5fa3d Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 14 Feb 2025 10:30:35 +0000 Subject: [PATCH 132/246] chore(internal): add missing return type annotation (#1334) --- src/pagination.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pagination.ts b/src/pagination.ts index ad90a3a74..7a513fc44 100644 --- a/src/pagination.ts +++ b/src/pagination.ts @@ -77,7 +77,7 @@ export class CursorPage return this.data ?? []; } - override hasNextPage() { + override hasNextPage(): boolean { if (this.has_more === false) { return false; } From b9460fbc7ca9639df91c0b7184eea9c7631ae313 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 14 Feb 2025 10:28:44 +0000 Subject: [PATCH 133/246] CI: add ecosystem tests (#1332) --- .github/workflows/ci.yml | 33 +++++++++++++++++++++++++++++++++ .gitignore | 2 +- ecosystem-tests/cli.ts | 1 + 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d6798e38a..85d792c44 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -64,3 +64,36 @@ jobs: - name: Run tests run: ./scripts/test + + ecosystem_tests: + name: ecosystem tests (v${{ matrix.node-version }}) + runs-on: ubuntu-latest + if: github.repository == 'openai/openai-node' + timeout-minutes: 20 + strategy: + fail-fast: false + matrix: + node-version: ['20'] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node + uses: actions/setup-node@v4 + with: + node-version: '${{ matrix.node-version }}' + + - uses: denoland/setup-deno@v1 + with: + deno-version: v1.39.0 + + - uses: oven-sh/setup-bun@v2 + + - name: Bootstrap + run: ./scripts/bootstrap + + - name: Run ecosystem tests + run: | + yarn tsn ecosystem-tests/cli.ts --live --verbose --parallel --jobs=4 --retry=3 + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} diff --git a/.gitignore b/.gitignore index 81c4c41ca..3fdab1cb7 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,4 @@ tmp .pack ecosystem-tests/deno/package.json ecosystem-tests/*/openai.tgz - +.dev.vars diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index 4803b47c2..77faddec5 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -70,6 +70,7 @@ const projectRunners = { 'cloudflare-worker': async () => { await installPackage(); + await fs.writeFile('.dev.vars', `OPENAI_API_KEY='${process.env['OPENAI_API_KEY']}'`); await run('npm', ['run', 'tsc']); if (state.live) { From 212710db8c8c139392c6532c0eccfd13558ef2d4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 14 Feb 2025 10:31:06 +0000 Subject: [PATCH 134/246] release: 4.85.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 +++++++++++++ jsr.json | 2 +- package.json | 37 +++++++++++++++++++++++++++-------- src/version.ts | 2 +- 5 files changed, 46 insertions(+), 11 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f48cc7f57..89f1ce153 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.85.0" + ".": "4.85.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 290b2414d..9850ac460 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.85.1 (2025-02-14) + +Full Changelog: [v4.85.0...v4.85.1](https://github.com/openai/openai-node/compare/v4.85.0...v4.85.1) + +### Bug Fixes + +* **client:** fix export map for index exports ([#1328](https://github.com/openai/openai-node/issues/1328)) ([647ba7a](https://github.com/openai/openai-node/commit/647ba7a52311928f604c72b2cc95698c0837887f)) +* **package:** add chat/completions.ts back in ([#1333](https://github.com/openai/openai-node/issues/1333)) ([e4b5546](https://github.com/openai/openai-node/commit/e4b554632ab1646da831f29413fefb3378c49cc1)) + + +### Chores + +* **internal:** add missing return type annotation ([#1334](https://github.com/openai/openai-node/issues/1334)) ([53e0856](https://github.com/openai/openai-node/commit/53e0856ec4d36deee4d71b5aaf436df0a59b9402)) + ## 4.85.0 (2025-02-13) Full Changelog: [v4.84.1...v4.85.0](https://github.com/openai/openai-node/compare/v4.84.1...v4.85.0) diff --git a/jsr.json b/jsr.json index 368f86c0b..0e1eea3b3 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.85.0", + "version": "4.85.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 46f58814d..45337f85d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.85.0", + "version": "4.85.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", @@ -112,17 +112,38 @@ "default": "./dist/index.mjs" }, "./*.mjs": { - "types": ["./dist/*.d.ts", "./dist/*/index.d.ts"], - "default": ["./dist/*.mjs", "./dist/*/index.mjs"] + "types": [ + "./dist/*.d.ts", + "./dist/*/index.d.ts" + ], + "default": [ + "./dist/*.mjs", + "./dist/*/index.mjs" + ] }, "./*.js": { - "types": ["./dist/*.d.ts", "./dist/*/index.d.ts"], - "default": ["./dist/*.js", "./dist/*/index.js"] + "types": [ + "./dist/*.d.ts", + "./dist/*/index.d.ts" + ], + "default": [ + "./dist/*.js", + "./dist/*/index.js" + ] }, "./*": { - "types": ["./dist/*.d.ts", "./dist/*/index.d.ts"], - "require": ["./dist/*.js", "./dist/*/index.js"], - "default": ["./dist/*.mjs", "./dist/*/index.mjs"] + "types": [ + "./dist/*.d.ts", + "./dist/*/index.d.ts" + ], + "require": [ + "./dist/*.js", + "./dist/*/index.js" + ], + "default": [ + "./dist/*.mjs", + "./dist/*/index.mjs" + ] } }, "bin": "./bin/cli", diff --git a/src/version.ts b/src/version.ts index 6483fa72b..52fb45056 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.85.0'; // x-release-please-version +export const VERSION = '4.85.1'; // x-release-please-version From b0b4189420e1c5bb5fc4bbb8925f88fe65f9b217 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Feb 2025 18:03:31 +0000 Subject: [PATCH 135/246] fix: optimize sse chunk reading off-by-one error (#1339) --- src/internal/decoders/line.ts | 31 +++++++ src/streaming.ts | 48 +--------- tests/internal/decoders/line.test.ts | 128 +++++++++++++++++++++++++++ tests/streaming.test.ts | 81 +---------------- 4 files changed, 161 insertions(+), 127 deletions(-) create mode 100644 tests/internal/decoders/line.test.ts diff --git a/src/internal/decoders/line.ts b/src/internal/decoders/line.ts index 66f62c057..947f240b3 100644 --- a/src/internal/decoders/line.ts +++ b/src/internal/decoders/line.ts @@ -143,3 +143,34 @@ function findNewlineIndex( return null; } + +export function findDoubleNewlineIndex(buffer: Uint8Array): number { + // This function searches the buffer for the end patterns (\r\r, \n\n, \r\n\r\n) + // and returns the index right after the first occurrence of any pattern, + // or -1 if none of the patterns are found. + const newline = 0x0a; // \n + const carriage = 0x0d; // \r + + for (let i = 0; i < buffer.length - 1; i++) { + if (buffer[i] === newline && buffer[i + 1] === newline) { + // \n\n + return i + 2; + } + if (buffer[i] === carriage && buffer[i + 1] === carriage) { + // \r\r + return i + 2; + } + if ( + buffer[i] === carriage && + buffer[i + 1] === newline && + i + 3 < buffer.length && + buffer[i + 2] === carriage && + buffer[i + 3] === newline + ) { + // \r\n\r\n + return i + 4; + } + } + + return -1; +} diff --git a/src/streaming.ts b/src/streaming.ts index 1d1ae344b..52266154c 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -1,6 +1,6 @@ import { ReadableStream, type Response } from './_shims/index'; import { OpenAIError } from './error'; -import { LineDecoder } from './internal/decoders/line'; +import { findDoubleNewlineIndex, LineDecoder } from './internal/decoders/line'; import { ReadableStreamToAsyncIterable } from './internal/stream-utils'; import { APIError } from './error'; @@ -259,37 +259,6 @@ async function* iterSSEChunks(iterator: AsyncIterableIterator): AsyncGene } } -function findDoubleNewlineIndex(buffer: Uint8Array): number { - // This function searches the buffer for the end patterns (\r\r, \n\n, \r\n\r\n) - // and returns the index right after the first occurrence of any pattern, - // or -1 if none of the patterns are found. - const newline = 0x0a; // \n - const carriage = 0x0d; // \r - - for (let i = 0; i < buffer.length - 2; i++) { - if (buffer[i] === newline && buffer[i + 1] === newline) { - // \n\n - return i + 2; - } - if (buffer[i] === carriage && buffer[i + 1] === carriage) { - // \r\r - return i + 2; - } - if ( - buffer[i] === carriage && - buffer[i + 1] === newline && - i + 3 < buffer.length && - buffer[i + 2] === carriage && - buffer[i + 3] === newline - ) { - // \r\n\r\n - return i + 4; - } - } - - return -1; -} - class SSEDecoder { private data: string[]; private event: string | null; @@ -345,21 +314,6 @@ class SSEDecoder { } } -/** This is an internal helper function that's just used for testing */ -export function _decodeChunks(chunks: string[], { flush }: { flush: boolean } = { flush: false }): string[] { - const decoder = new LineDecoder(); - const lines: string[] = []; - for (const chunk of chunks) { - lines.push(...decoder.decode(chunk)); - } - - if (flush) { - lines.push(...decoder.flush()); - } - - return lines; -} - function partition(str: string, delimiter: string): [string, string, string] { const index = str.indexOf(delimiter); if (index !== -1) { diff --git a/tests/internal/decoders/line.test.ts b/tests/internal/decoders/line.test.ts new file mode 100644 index 000000000..e76858e55 --- /dev/null +++ b/tests/internal/decoders/line.test.ts @@ -0,0 +1,128 @@ +import { findDoubleNewlineIndex, LineDecoder } from 'openai/internal/decoders/line'; + +function decodeChunks(chunks: string[], { flush }: { flush: boolean } = { flush: false }): string[] { + const decoder = new LineDecoder(); + const lines: string[] = []; + for (const chunk of chunks) { + lines.push(...decoder.decode(chunk)); + } + + if (flush) { + lines.push(...decoder.flush()); + } + + return lines; +} + +describe('line decoder', () => { + test('basic', () => { + // baz is not included because the line hasn't ended yet + expect(decodeChunks(['foo', ' bar\nbaz'])).toEqual(['foo bar']); + }); + + test('basic with \\r', () => { + expect(decodeChunks(['foo', ' bar\r\nbaz'])).toEqual(['foo bar']); + expect(decodeChunks(['foo', ' bar\r\nbaz'], { flush: true })).toEqual(['foo bar', 'baz']); + }); + + test('trailing new lines', () => { + expect(decodeChunks(['foo', ' bar', 'baz\n', 'thing\n'])).toEqual(['foo barbaz', 'thing']); + }); + + test('trailing new lines with \\r', () => { + expect(decodeChunks(['foo', ' bar', 'baz\r\n', 'thing\r\n'])).toEqual(['foo barbaz', 'thing']); + }); + + test('escaped new lines', () => { + expect(decodeChunks(['foo', ' bar\\nbaz\n'])).toEqual(['foo bar\\nbaz']); + }); + + test('escaped new lines with \\r', () => { + expect(decodeChunks(['foo', ' bar\\r\\nbaz\n'])).toEqual(['foo bar\\r\\nbaz']); + }); + + test('\\r & \\n split across multiple chunks', () => { + expect(decodeChunks(['foo\r', '\n', 'bar'], { flush: true })).toEqual(['foo', 'bar']); + }); + + test('single \\r', () => { + expect(decodeChunks(['foo\r', 'bar'], { flush: true })).toEqual(['foo', 'bar']); + }); + + test('double \\r', () => { + expect(decodeChunks(['foo\r', 'bar\r'], { flush: true })).toEqual(['foo', 'bar']); + expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + // implementation detail that we don't yield the single \r line until a new \r or \n is encountered + expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: false })).toEqual(['foo']); + }); + + test('double \\r then \\r\\n', () => { + expect(decodeChunks(['foo\r', '\r', '\r', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); + expect(decodeChunks(['foo\n', '\n', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); + }); + + test('double newline', () => { + expect(decodeChunks(['foo\n\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo', '\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo', '\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + }); + + test('multi-byte characters across chunks', () => { + const decoder = new LineDecoder(); + + // bytes taken from the string 'известни' and arbitrarily split + // so that some multi-byte characters span multiple chunks + expect(decoder.decode(new Uint8Array([0xd0]))).toHaveLength(0); + expect(decoder.decode(new Uint8Array([0xb8, 0xd0, 0xb7, 0xd0]))).toHaveLength(0); + expect( + decoder.decode(new Uint8Array([0xb2, 0xd0, 0xb5, 0xd1, 0x81, 0xd1, 0x82, 0xd0, 0xbd, 0xd0, 0xb8])), + ).toHaveLength(0); + + const decoded = decoder.decode(new Uint8Array([0xa])); + expect(decoded).toEqual(['известни']); + }); + + test('flushing trailing newlines', () => { + expect(decodeChunks(['foo\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + }); + + test('flushing empty buffer', () => { + expect(decodeChunks([], { flush: true })).toEqual([]); + }); +}); + +describe('findDoubleNewlineIndex', () => { + test('finds \\n\\n', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\n\nbar'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\n\nbar'))).toBe(2); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\n\n'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\n\n'))).toBe(2); + }); + + test('finds \\r\\r', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\rbar'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\rbar'))).toBe(2); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\r'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\r'))).toBe(2); + }); + + test('finds \\r\\n\\r\\n', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n\r\nbar'))).toBe(7); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\n\r\nbar'))).toBe(4); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n\r\n'))).toBe(7); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\n\r\n'))).toBe(4); + }); + + test('returns -1 when no double newline found', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\nbar'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\rbar'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\nbar'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode(''))).toBe(-1); + }); + + test('handles incomplete patterns', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n\r'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n'))).toBe(-1); + }); +}); diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts index 8e5d0ca31..b9a38f208 100644 --- a/tests/streaming.test.ts +++ b/tests/streaming.test.ts @@ -1,86 +1,7 @@ import { Response } from 'node-fetch'; import { PassThrough } from 'stream'; import assert from 'assert'; -import { _iterSSEMessages, _decodeChunks as decodeChunks } from 'openai/streaming'; -import { LineDecoder } from 'openai/internal/decoders/line'; - -describe('line decoder', () => { - test('basic', () => { - // baz is not included because the line hasn't ended yet - expect(decodeChunks(['foo', ' bar\nbaz'])).toEqual(['foo bar']); - }); - - test('basic with \\r', () => { - expect(decodeChunks(['foo', ' bar\r\nbaz'])).toEqual(['foo bar']); - expect(decodeChunks(['foo', ' bar\r\nbaz'], { flush: true })).toEqual(['foo bar', 'baz']); - }); - - test('trailing new lines', () => { - expect(decodeChunks(['foo', ' bar', 'baz\n', 'thing\n'])).toEqual(['foo barbaz', 'thing']); - }); - - test('trailing new lines with \\r', () => { - expect(decodeChunks(['foo', ' bar', 'baz\r\n', 'thing\r\n'])).toEqual(['foo barbaz', 'thing']); - }); - - test('escaped new lines', () => { - expect(decodeChunks(['foo', ' bar\\nbaz\n'])).toEqual(['foo bar\\nbaz']); - }); - - test('escaped new lines with \\r', () => { - expect(decodeChunks(['foo', ' bar\\r\\nbaz\n'])).toEqual(['foo bar\\r\\nbaz']); - }); - - test('\\r & \\n split across multiple chunks', () => { - expect(decodeChunks(['foo\r', '\n', 'bar'], { flush: true })).toEqual(['foo', 'bar']); - }); - - test('single \\r', () => { - expect(decodeChunks(['foo\r', 'bar'], { flush: true })).toEqual(['foo', 'bar']); - }); - - test('double \\r', () => { - expect(decodeChunks(['foo\r', 'bar\r'], { flush: true })).toEqual(['foo', 'bar']); - expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); - // implementation detail that we don't yield the single \r line until a new \r or \n is encountered - expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: false })).toEqual(['foo']); - }); - - test('double \\r then \\r\\n', () => { - expect(decodeChunks(['foo\r', '\r', '\r', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); - expect(decodeChunks(['foo\n', '\n', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); - }); - - test('double newline', () => { - expect(decodeChunks(['foo\n\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); - expect(decodeChunks(['foo', '\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); - expect(decodeChunks(['foo\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); - expect(decodeChunks(['foo', '\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); - }); - - test('multi-byte characters across chunks', () => { - const decoder = new LineDecoder(); - - // bytes taken from the string 'известни' and arbitrarily split - // so that some multi-byte characters span multiple chunks - expect(decoder.decode(new Uint8Array([0xd0]))).toHaveLength(0); - expect(decoder.decode(new Uint8Array([0xb8, 0xd0, 0xb7, 0xd0]))).toHaveLength(0); - expect( - decoder.decode(new Uint8Array([0xb2, 0xd0, 0xb5, 0xd1, 0x81, 0xd1, 0x82, 0xd0, 0xbd, 0xd0, 0xb8])), - ).toHaveLength(0); - - const decoded = decoder.decode(new Uint8Array([0xa])); - expect(decoded).toEqual(['известни']); - }); - - test('flushing trailing newlines', () => { - expect(decodeChunks(['foo\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); - }); - - test('flushing empty buffer', () => { - expect(decodeChunks([], { flush: true })).toEqual([]); - }); -}); +import { _iterSSEMessages } from 'openai/streaming'; describe('streaming decoding', () => { test('basic', async () => { From 2bce86509a45d96d17cfc837ddfd8ddc5995df8e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Feb 2025 18:04:10 +0000 Subject: [PATCH 136/246] release: 4.85.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 89f1ce153..541794534 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.85.1" + ".": "4.85.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 9850ac460..70a447b0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.85.2 (2025-02-18) + +Full Changelog: [v4.85.1...v4.85.2](https://github.com/openai/openai-node/compare/v4.85.1...v4.85.2) + +### Bug Fixes + +* optimize sse chunk reading off-by-one error ([#1339](https://github.com/openai/openai-node/issues/1339)) ([c82795b](https://github.com/openai/openai-node/commit/c82795b189c73d1c0e3bc3a40d0d4a2558b0483a)) + ## 4.85.1 (2025-02-14) Full Changelog: [v4.85.0...v4.85.1](https://github.com/openai/openai-node/compare/v4.85.0...v4.85.1) diff --git a/jsr.json b/jsr.json index 0e1eea3b3..8f83c0ff2 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.85.1", + "version": "4.85.2", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 45337f85d..661bc2938 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.85.1", + "version": "4.85.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 52fb45056..4fdc11dc7 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.85.1'; // x-release-please-version +export const VERSION = '4.85.2'; // x-release-please-version From 6d056bf95c9be4046decf20ec4c98dfa2bea2723 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 20 Feb 2025 11:09:29 +0000 Subject: [PATCH 137/246] fix(parsing): remove tool_calls default empty array (#1341) --- src/lib/parser.ts | 17 +++++++++++++++-- src/resources/beta/chat/completions.ts | 2 +- tests/lib/ChatCompletionRunFunctions.test.ts | 20 ++++++++++---------- tests/lib/ChatCompletionStream.test.ts | 3 --- tests/lib/parser.test.ts | 6 ------ 5 files changed, 26 insertions(+), 22 deletions(-) diff --git a/src/lib/parser.ts b/src/lib/parser.ts index f2678e312..a750375dc 100644 --- a/src/lib/parser.ts +++ b/src/lib/parser.ts @@ -119,7 +119,15 @@ export function maybeParseChatCompletion< ...completion, choices: completion.choices.map((choice) => ({ ...choice, - message: { ...choice.message, parsed: null, tool_calls: choice.message.tool_calls ?? [] }, + message: { + ...choice.message, + parsed: null, + ...(choice.message.tool_calls ? + { + tool_calls: choice.message.tool_calls, + } + : undefined), + }, })), }; } @@ -144,7 +152,12 @@ export function parseChatCompletion< ...choice, message: { ...choice.message, - tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall(params, toolCall)) ?? [], + ...(choice.message.tool_calls ? + { + tool_calls: + choice.message.tool_calls?.map((toolCall) => parseToolCall(params, toolCall)) ?? undefined, + } + : undefined), parsed: choice.message.content && !choice.message.refusal ? parseResponseFormat(params, choice.message.content) diff --git a/src/resources/beta/chat/completions.ts b/src/resources/beta/chat/completions.ts index c9360a95c..083b9914e 100644 --- a/src/resources/beta/chat/completions.ts +++ b/src/resources/beta/chat/completions.ts @@ -50,7 +50,7 @@ export interface ParsedFunctionToolCall extends ChatCompletionMessageToolCall { export interface ParsedChatCompletionMessage extends ChatCompletionMessage { parsed: ParsedT | null; - tool_calls: Array; + tool_calls?: Array; } export interface ParsedChoice extends ChatCompletion.Choice { diff --git a/tests/lib/ChatCompletionRunFunctions.test.ts b/tests/lib/ChatCompletionRunFunctions.test.ts index b684f204d..496501a86 100644 --- a/tests/lib/ChatCompletionRunFunctions.test.ts +++ b/tests/lib/ChatCompletionRunFunctions.test.ts @@ -628,7 +628,7 @@ describe('resource completions', () => { content: "it's raining", parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.functionCallResults).toEqual([`it's raining`]); @@ -876,7 +876,7 @@ describe('resource completions', () => { content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.functionCallResults).toEqual(['3']); @@ -1125,7 +1125,7 @@ describe('resource completions', () => { content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.functionCallResults).toEqual([`must be an object`, '3']); @@ -1443,7 +1443,7 @@ describe('resource completions', () => { content: "it's raining", parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.functionCallResults).toEqual([ @@ -1572,7 +1572,7 @@ describe('resource completions', () => { content: "it's raining", parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.eventFunctionCallResults).toEqual([`it's raining`]); @@ -1795,7 +1795,7 @@ describe('resource completions', () => { content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.eventFunctionCallResults).toEqual(['3']); @@ -1997,7 +1997,7 @@ describe('resource completions', () => { content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.eventFunctionCallResults).toEqual([`must be an object`, '3']); @@ -2301,7 +2301,7 @@ describe('resource completions', () => { content: "it's raining", parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.eventFunctionCallResults).toEqual([ @@ -2347,7 +2347,7 @@ describe('resource completions', () => { content: 'The weather is great today!', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }); await listener.sanityCheck(); }); @@ -2386,7 +2386,7 @@ describe('resource completions', () => { content: 'The weather is great today!', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }); await listener.sanityCheck(); }); diff --git a/tests/lib/ChatCompletionStream.test.ts b/tests/lib/ChatCompletionStream.test.ts index e5ef20c9e..34c5fd204 100644 --- a/tests/lib/ChatCompletionStream.test.ts +++ b/tests/lib/ChatCompletionStream.test.ts @@ -39,7 +39,6 @@ describe('.stream()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], }, } `); @@ -198,7 +197,6 @@ describe('.stream()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], }, } `); @@ -386,7 +384,6 @@ describe('.stream()', () => { "parsed": null, "refusal": "I'm very sorry, but I can't assist with that request.", "role": "assistant", - "tool_calls": [], }, } `); diff --git a/tests/lib/parser.test.ts b/tests/lib/parser.test.ts index b220e92d3..fa8123f5c 100644 --- a/tests/lib/parser.test.ts +++ b/tests/lib/parser.test.ts @@ -39,7 +39,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], }, } `); @@ -154,7 +153,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], } `); @@ -488,7 +486,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], } `); }); @@ -787,7 +784,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], } `); }); @@ -947,7 +943,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], } `); }); @@ -1061,7 +1056,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], } `); }); From d92fd953309951f4d6dcc9858d8782ea1bff4c79 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Feb 2025 11:10:00 +0000 Subject: [PATCH 138/246] release: 4.85.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 541794534..712720117 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.85.2" + ".": "4.85.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 70a447b0a..36debfad1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.85.3 (2025-02-20) + +Full Changelog: [v4.85.2...v4.85.3](https://github.com/openai/openai-node/compare/v4.85.2...v4.85.3) + +### Bug Fixes + +* **parsing:** remove tool_calls default empty array ([#1341](https://github.com/openai/openai-node/issues/1341)) ([2672160](https://github.com/openai/openai-node/commit/26721608e61949daa9592483e89b79230bb9198a)) + ## 4.85.2 (2025-02-18) Full Changelog: [v4.85.1...v4.85.2](https://github.com/openai/openai-node/compare/v4.85.1...v4.85.2) diff --git a/jsr.json b/jsr.json index 8f83c0ff2..3c480dc70 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.85.2", + "version": "4.85.3", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 661bc2938..5fdd39fdc 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.85.2", + "version": "4.85.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 4fdc11dc7..679cac2c7 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.85.2'; // x-release-please-version +export const VERSION = '4.85.3'; // x-release-please-version From 9485f5d4d6718bff7f579223c9aa528898451533 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 21 Feb 2025 15:08:12 +0000 Subject: [PATCH 139/246] chore(internal): fix devcontainers setup (#1343) --- .devcontainer/Dockerfile | 23 ----------------------- .devcontainer/devcontainer.json | 27 ++++++++++++--------------- 2 files changed, 12 insertions(+), 38 deletions(-) delete mode 100644 .devcontainer/Dockerfile diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile deleted file mode 100644 index 8ea34be96..000000000 --- a/.devcontainer/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -# syntax=docker/dockerfile:1 -FROM debian:bookworm-slim AS stainless - -RUN apt-get update && apt-get install -y \ - nodejs \ - npm \ - yarnpkg \ - && apt-get clean autoclean - -# Ensure UTF-8 encoding -ENV LANG=C.UTF-8 -ENV LC_ALL=C.UTF-8 - -# Yarn -RUN ln -sf /usr/bin/yarnpkg /usr/bin/yarn - -WORKDIR /workspace - -COPY package.json yarn.lock /workspace/ - -RUN yarn install - -COPY . /workspace diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index d55fc4d67..763462fad 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,20 +1,17 @@ // For format details, see https://aka.ms/devcontainer.json. For config options, see the // README at: https://github.com/devcontainers/templates/tree/main/src/debian { - "name": "Debian", - "build": { - "dockerfile": "Dockerfile" + "name": "Development", + "image": "mcr.microsoft.com/devcontainers/typescript-node:latest", + "features": { + "ghcr.io/devcontainers/features/node:1": {} + }, + "postCreateCommand": "yarn install", + "customizations": { + "vscode": { + "extensions": [ + "esbenp.prettier-vscode" + ] + } } - - // Features to add to the dev container. More info: https://containers.dev/features. - // "features": {}, - - // Use 'forwardPorts' to make a list of ports inside the container available locally. - // "forwardPorts": [], - - // Configure tool-specific properties. - // "customizations": {}, - - // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. - // "remoteUser": "root" } From a1a125349ba9c9c2bb602c8c8f368e086c41ac1e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 22 Feb 2025 05:06:15 +0000 Subject: [PATCH 140/246] release: 4.85.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 712720117..6fc92ed1e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.85.3" + ".": "4.85.4" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 36debfad1..e2f920af7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.85.4 (2025-02-22) + +Full Changelog: [v4.85.3...v4.85.4](https://github.com/openai/openai-node/compare/v4.85.3...v4.85.4) + +### Chores + +* **internal:** fix devcontainers setup ([#1343](https://github.com/openai/openai-node/issues/1343)) ([cb1ec90](https://github.com/openai/openai-node/commit/cb1ec907832e325bc29abe94ae325e0477cb87d1)) + ## 4.85.3 (2025-02-20) Full Changelog: [v4.85.2...v4.85.3](https://github.com/openai/openai-node/compare/v4.85.2...v4.85.3) diff --git a/jsr.json b/jsr.json index 3c480dc70..7ced58a9c 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.85.3", + "version": "4.85.4", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 5fdd39fdc..38572079f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.85.3", + "version": "4.85.4", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 679cac2c7..ebfb680f1 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.85.3'; // x-release-please-version +export const VERSION = '4.85.4'; // x-release-please-version From bb269a1a6fda11c533fb88fa1250a342a5a11ed0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Feb 2025 20:05:21 +0000 Subject: [PATCH 141/246] feat(api): add gpt-4.5-preview (#1349) --- .stats.yml | 2 +- src/resources/beta/assistants.ts | 2 ++ src/resources/beta/realtime/realtime.ts | 24 +++++++++++++++++------- src/resources/beta/realtime/sessions.ts | 24 ++++++++++++++++++++++-- src/resources/chat/chat.ts | 2 ++ src/resources/files.ts | 5 +++++ src/resources/uploads/uploads.ts | 2 +- 7 files changed, 50 insertions(+), 11 deletions(-) diff --git a/.stats.yml b/.stats.yml index 658877d3b..163146e38 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 74 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-4aa6ee65ba9efc789e05e6a5ef0883b2cadf06def8efd863dbf75e9e233067e1.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-5d30684c3118d049682ea30cdb4dbef39b97d51667da484689193dc40162af32.yml diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 0cc63d691..919bf53b3 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -1310,6 +1310,8 @@ export interface AssistantUpdateParams { | 'gpt-4o-2024-05-13' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' + | 'gpt-4.5-preview' + | 'gpt-4.5-preview-2025-02-27' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index e46dcdaaf..5e2b1c833 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -1796,11 +1796,14 @@ export interface SessionCreatedEvent { /** * Send this event to update the session’s default configuration. The client may - * send this event at any time to update the session configuration, and any field - * may be updated at any time, except for "voice". The server will respond with a - * `session.updated` event that shows the full effective configuration. Only fields - * that are present are updated, thus the correct way to clear a field like - * "instructions" is to pass an empty string. + * send this event at any time to update any field, except for `voice`. However, + * note that once a session has been initialized with a particular `model`, it + * can’t be changed to another model using `session.update`. + * + * When the server receives a `session.update`, it will respond with a + * `session.updated` event showing the full, effective configuration. Only the + * fields that are present are updated. To clear a field like `instructions`, pass + * an empty string. */ export interface SessionUpdateEvent { /** @@ -1982,11 +1985,18 @@ export namespace SessionUpdateEvent { */ export interface TurnDetection { /** - * Whether or not to automatically generate a response when VAD is enabled. `true` - * by default. + * Whether or not to automatically generate a response when a VAD stop event + * occurs. `true` by default. */ create_response?: boolean; + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. `true` by default. + */ + interrupt_response?: boolean; + /** * Amount of audio to include before the VAD detected speech (in milliseconds). * Defaults to 300ms. diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts index d2afa25b1..a99c9e045 100644 --- a/src/resources/beta/realtime/sessions.ts +++ b/src/resources/beta/realtime/sessions.ts @@ -168,6 +168,19 @@ export namespace Session { * volume and respond at the end of user speech. */ export interface TurnDetection { + /** + * Whether or not to automatically generate a response when a VAD stop event + * occurs. `true` by default. + */ + create_response?: boolean; + + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. `true` by default. + */ + interrupt_response?: boolean; + /** * Amount of audio to include before the VAD detected speech (in milliseconds). * Defaults to 300ms. @@ -532,11 +545,18 @@ export namespace SessionCreateParams { */ export interface TurnDetection { /** - * Whether or not to automatically generate a response when VAD is enabled. `true` - * by default. + * Whether or not to automatically generate a response when a VAD stop event + * occurs. `true` by default. */ create_response?: boolean; + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. `true` by default. + */ + interrupt_response?: boolean; + /** * Amount of audio to include before the VAD detected speech (in milliseconds). * Defaults to 300ms. diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 5bceec45a..627b4fc23 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -61,6 +61,8 @@ export type ChatModel = | 'o1-preview-2024-09-12' | 'o1-mini' | 'o1-mini-2024-09-12' + | 'gpt-4.5-preview' + | 'gpt-4.5-preview-2025-02-27' | 'gpt-4o' | 'gpt-4o-2024-11-20' | 'gpt-4o-2024-08-06' diff --git a/src/resources/files.ts b/src/resources/files.ts index 67bc95469..f5f23dcad 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -173,6 +173,11 @@ export interface FileObject { */ status: 'uploaded' | 'processed' | 'error'; + /** + * The Unix timestamp (in seconds) for when the file will expire. + */ + expires_at?: number; + /** * @deprecated Deprecated. For details on why a fine-tuning training file failed * validation, see the `error` field on `fine_tuning.job`. diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts index bfe752cd7..f977e18f6 100644 --- a/src/resources/uploads/uploads.ts +++ b/src/resources/uploads/uploads.ts @@ -86,7 +86,7 @@ export interface Upload { created_at: number; /** - * The Unix timestamp (in seconds) for when the Upload was created. + * The Unix timestamp (in seconds) for when the Upload will expire. */ expires_at: number; From f93c5bc81d2b58dd821b9c11a789d2750951837d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Feb 2025 20:05:52 +0000 Subject: [PATCH 142/246] release: 4.86.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6fc92ed1e..28ebbc3ab 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.85.4" + ".": "4.86.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e2f920af7..48445f98a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.86.0 (2025-02-27) + +Full Changelog: [v4.85.4...v4.86.0](https://github.com/openai/openai-node/compare/v4.85.4...v4.86.0) + +### Features + +* **api:** add gpt-4.5-preview ([#1349](https://github.com/openai/openai-node/issues/1349)) ([2a1d36b](https://github.com/openai/openai-node/commit/2a1d36b560323fca058f98607775642370e90a47)) + ## 4.85.4 (2025-02-22) Full Changelog: [v4.85.3...v4.85.4](https://github.com/openai/openai-node/compare/v4.85.3...v4.85.4) diff --git a/jsr.json b/jsr.json index 7ced58a9c..28a13dd6b 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.85.4", + "version": "4.86.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 38572079f..be7052b15 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.85.4", + "version": "4.86.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index ebfb680f1..d342ca5d3 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.85.4'; // x-release-please-version +export const VERSION = '4.86.0'; // x-release-please-version From 634a209a6025640e2849133f6997af8faa28d4d8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Feb 2025 22:00:53 +0000 Subject: [PATCH 143/246] docs: update URLs from stainlessapi.com to stainless.com (#1352) More details at https://www.stainless.com/changelog/stainless-com --- SECURITY.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index c54acaf33..3b3bd8a66 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,9 +2,9 @@ ## Reporting Security Issues -This SDK is generated by [Stainless Software Inc](http://stainlessapi.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. +This SDK is generated by [Stainless Software Inc](http://stainless.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. -To report a security issue, please contact the Stainless team at security@stainlessapi.com. +To report a security issue, please contact the Stainless team at security@stainless.com. ## Responsible Disclosure From 0d3045ea19e34712fb395000545fcce3f9201149 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Feb 2025 22:01:25 +0000 Subject: [PATCH 144/246] release: 4.86.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 28ebbc3ab..92b3782ff 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.86.0" + ".": "4.86.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 48445f98a..9dd57c5ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.86.1 (2025-02-27) + +Full Changelog: [v4.86.0...v4.86.1](https://github.com/openai/openai-node/compare/v4.86.0...v4.86.1) + +### Documentation + +* update URLs from stainlessapi.com to stainless.com ([#1352](https://github.com/openai/openai-node/issues/1352)) ([8294e9e](https://github.com/openai/openai-node/commit/8294e9ef57ed98722105b56d205ebea9d028f671)) + ## 4.86.0 (2025-02-27) Full Changelog: [v4.85.4...v4.86.0](https://github.com/openai/openai-node/compare/v4.85.4...v4.86.0) diff --git a/jsr.json b/jsr.json index 28a13dd6b..c3addf639 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.86.0", + "version": "4.86.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index be7052b15..236815732 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.86.0", + "version": "4.86.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index d342ca5d3..759b28a99 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.86.0'; // x-release-please-version +export const VERSION = '4.86.1'; // x-release-please-version From 1044c487566569e773d5f6c1a94ce6b614e62b80 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 4 Mar 2025 21:17:33 +0000 Subject: [PATCH 145/246] chore(internal): run example files in CI (#1357) --- .github/workflows/ci.yml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 85d792c44..fe24c0dcb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -65,6 +65,26 @@ jobs: - name: Run tests run: ./scripts/test + examples: + name: examples + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node + uses: actions/setup-node@v4 + with: + node-version: '18' + - name: Install dependencies + run: | + yarn install + + - env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + run: | + yarn tsn examples/demo.ts + ecosystem_tests: name: ecosystem tests (v${{ matrix.node-version }}) runs-on: ubuntu-latest From 6e00ac242554d5f2b86852a082cab2538c605bc9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 5 Mar 2025 05:07:14 +0000 Subject: [PATCH 146/246] release: 4.86.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 92b3782ff..a889d24b4 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.86.1" + ".": "4.86.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 9dd57c5ae..38d54fdc1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.86.2 (2025-03-05) + +Full Changelog: [v4.86.1...v4.86.2](https://github.com/openai/openai-node/compare/v4.86.1...v4.86.2) + +### Chores + +* **internal:** run example files in CI ([#1357](https://github.com/openai/openai-node/issues/1357)) ([88d0050](https://github.com/openai/openai-node/commit/88d0050336749deb3810b4cb43473de1f84e42bd)) + ## 4.86.1 (2025-02-27) Full Changelog: [v4.86.0...v4.86.1](https://github.com/openai/openai-node/compare/v4.86.0...v4.86.1) diff --git a/jsr.json b/jsr.json index c3addf639..1c0948aaa 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.86.1", + "version": "4.86.2", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 236815732..78afb8946 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.86.1", + "version": "4.86.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 759b28a99..c43a3c320 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.86.1'; // x-release-please-version +export const VERSION = '4.86.2'; // x-release-please-version From 06122424a4d783aff07b7089b64986fb35bc24e4 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 11 Mar 2025 11:29:02 -0400 Subject: [PATCH 147/246] feat(api): add /v1/responses and built-in tools [platform.openai.com/docs/changelog](http://platform.openai.com/docs/changelog) --- .stats.yml | 4 +- README.md | 151 +- api.md | 224 +- examples/responses/stream.ts | 24 + examples/responses/streaming-tools.ts | 52 + .../responses/structured-outputs-tools.ts | 60 + examples/responses/structured-outputs.ts | 32 + examples/yarn.lock | 0 scripts/bootstrap | 2 +- src/core.ts | 4 +- src/helpers/zod.ts | 46 + src/index.ts | 58 +- src/lib/ResponsesParser.ts | 262 ++ src/lib/parser.ts | 28 + src/lib/responses/EventTypes.ts | 76 + src/lib/responses/ResponseStream.ts | 298 ++ src/resources/beta/assistants.ts | 55 +- src/resources/beta/beta.ts | 37 - src/resources/beta/index.ts | 16 - src/resources/beta/threads/runs/runs.ts | 7 +- src/resources/beta/threads/threads.ts | 90 +- src/resources/chat/chat.ts | 46 +- src/resources/chat/completions/completions.ts | 290 +- src/resources/chat/completions/index.ts | 1 - src/resources/chat/completions/messages.ts | 2 +- src/resources/chat/index.ts | 3 +- src/resources/files.ts | 26 +- src/resources/index.ts | 20 + src/resources/responses/index.ts | 9 + src/resources/responses/input-items.ts | 276 ++ src/resources/responses/responses.ts | 2761 +++++++++++++++++ src/resources/shared.ts | 156 +- src/resources/uploads/uploads.ts | 7 +- .../{beta => }/vector-stores/file-batches.ts | 23 +- .../{beta => }/vector-stores/files.ts | 89 +- .../{beta => }/vector-stores/index.ts | 6 + .../{beta => }/vector-stores/vector-stores.ts | 130 +- src/streaming.ts | 2 +- .../chat/completions/completions.test.ts | 11 +- .../responses/input-items.test.ts | 40 + .../responses.test.ts} | 68 +- .../vector-stores/file-batches.test.ts | 21 +- .../api-resources/vector-stores/files.test.ts | 132 + .../vector-stores/vector-stores.test.ts | 39 +- 44 files changed, 5226 insertions(+), 458 deletions(-) create mode 100755 examples/responses/stream.ts create mode 100755 examples/responses/streaming-tools.ts create mode 100755 examples/responses/structured-outputs-tools.ts create mode 100755 examples/responses/structured-outputs.ts create mode 100644 examples/yarn.lock create mode 100644 src/lib/ResponsesParser.ts create mode 100644 src/lib/responses/EventTypes.ts create mode 100644 src/lib/responses/ResponseStream.ts create mode 100644 src/resources/responses/index.ts create mode 100644 src/resources/responses/input-items.ts create mode 100644 src/resources/responses/responses.ts rename src/resources/{beta => }/vector-stores/file-batches.ts (92%) rename src/resources/{beta => }/vector-stores/files.ts (74%) rename src/resources/{beta => }/vector-stores/index.ts (82%) rename src/resources/{beta => }/vector-stores/vector-stores.ts (77%) create mode 100644 tests/api-resources/responses/input-items.test.ts rename tests/api-resources/{beta/vector-stores/files.test.ts => responses/responses.test.ts} (58%) rename tests/api-resources/{beta => }/vector-stores/file-batches.test.ts (81%) create mode 100644 tests/api-resources/vector-stores/files.test.ts rename tests/api-resources/{beta => }/vector-stores/vector-stores.test.ts (71%) diff --git a/.stats.yml b/.stats.yml index 163146e38..455874212 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 74 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-5d30684c3118d049682ea30cdb4dbef39b97d51667da484689193dc40162af32.yml +configured_endpoints: 81 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-be834d63e326a82494e819085137f5eb15866f3fc787db1f3afe7168d419e18a.yml diff --git a/README.md b/README.md index 166e35e22..8515c81ed 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,3 @@ -> [!IMPORTANT] -> We're actively working on a new alpha version that migrates from `node-fetch` to builtin fetch. -> -> Please try it out and let us know if you run into any issues! -> https://community.openai.com/t/your-feedback-requested-node-js-sdk-5-0-0-alpha/1063774 - # OpenAI TypeScript and JavaScript API Library [![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/openai) [![JSR Version](https://jsr.io/badges/@openai/openai)](https://jsr.io/@openai/openai) @@ -27,9 +21,7 @@ deno add jsr:@openai/openai npx jsr add @openai/openai ``` -These commands will make the module importable from the `@openai/openai` scope: - -You can also [import directly from JSR](https://jsr.io/docs/using-packages#importing-with-jsr-specifiers) without an install step if you're using the Deno JavaScript runtime: +These commands will make the module importable from the `@openai/openai` scope. You can also [import directly from JSR](https://jsr.io/docs/using-packages#importing-with-jsr-specifiers) without an install step if you're using the Deno JavaScript runtime: ```ts import OpenAI from 'jsr:@openai/openai'; @@ -37,9 +29,10 @@ import OpenAI from 'jsr:@openai/openai'; ## Usage -The full API of this library can be found in [api.md file](api.md) along with many [code examples](https://github.com/openai/openai-node/tree/master/examples). The code below shows how to get started using the chat completions API. +The full API of this library can be found in [api.md file](api.md) along with many [code examples](https://github.com/openai/openai-node/tree/master/examples). + +The primary API for interacting with OpenAI models is the [Responses API](https://platform.openai.com/docs/api-reference/responses). You can generate text from the model with the code below. - ```ts import OpenAI from 'openai'; @@ -47,100 +40,55 @@ const client = new OpenAI({ apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted }); -async function main() { - const chatCompletion = await client.chat.completions.create({ - messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-4o', - }); -} +const response = await client.responses.create({ + model: 'gpt-4o', + instructions: 'You are a coding assistant that talks like a pirate', + input: 'Are semicolons optional in JavaScript?', +}); -main(); +console.log(response.output_text); ``` -## Streaming responses - -We provide support for streaming responses using Server Sent Events (SSE). +The previous standard (supported indefinitely) for generating text is the [Chat Completions API](https://platform.openai.com/docs/api-reference/chat). You can use that API to generate text from the model with the code below. ```ts import OpenAI from 'openai'; -const client = new OpenAI(); +const client = new OpenAI({ + apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted +}); -async function main() { - const stream = await client.chat.completions.create({ - model: 'gpt-4o', - messages: [{ role: 'user', content: 'Say this is a test' }], - stream: true, - }); - for await (const chunk of stream) { - process.stdout.write(chunk.choices[0]?.delta?.content || ''); - } -} +const completion = await client.chat.completions.create({ + model: 'gpt-4o', + messages: [ + { role: 'developer', content: 'Talk like a pirate.' }, + { role: 'user', content: 'Are semicolons optional in JavaScript?' }, + ], +}); -main(); +console.log(completion.choices[0].message.content); ``` -If you need to cancel a stream, you can `break` from the loop or call `stream.controller.abort()`. - -### Chat Completion streaming helpers +## Streaming responses -This library also provides several conveniences for streaming chat completions, for example: +We provide support for streaming responses using Server Sent Events (SSE). ```ts import OpenAI from 'openai'; -const openai = new OpenAI(); - -async function main() { - const stream = await openai.beta.chat.completions.stream({ - model: 'gpt-4o', - messages: [{ role: 'user', content: 'Say this is a test' }], - stream: true, - }); - - stream.on('content', (delta, snapshot) => { - process.stdout.write(delta); - }); - - // or, equivalently: - for await (const chunk of stream) { - process.stdout.write(chunk.choices[0]?.delta?.content || ''); - } - - const chatCompletion = await stream.finalChatCompletion(); - console.log(chatCompletion); // {id: "…", choices: […], …} -} - -main(); -``` - -See [helpers.md](helpers.md#chat-events) for more details. - -### Request & Response types - -This library includes TypeScript definitions for all request params and response fields. You may import and use them like so: - - -```ts -import OpenAI from 'openai'; +const client = new OpenAI(); -const client = new OpenAI({ - apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted +const stream = await client.responses.create({ + model: 'gpt-4o', + input: 'Say "Sheep sleep deep" ten times fast!', + stream: true, }); -async function main() { - const params: OpenAI.Chat.ChatCompletionCreateParams = { - messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-4o', - }; - const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params); +for await (const event of stream) { + console.log(event); } - -main(); ``` -Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors. - ## File uploads Request parameters that correspond to file uploads can be passed in many different forms: @@ -265,17 +213,17 @@ Note that requests which time out will be [retried twice by default](#retries). All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI. ```ts -const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }); -console.log(completion._request_id) // req_123 +const response = await client.responses.create({ model: 'gpt-4o', input: 'testing 123' }); +console.log(response._request_id) // req_123 ``` You can also access the Request ID using the `.withResponse()` method: ```ts -const { data: stream, request_id } = await openai.chat.completions +const { data: stream, request_id } = await openai.responses .create({ - model: 'gpt-4', - messages: [{ role: 'user', content: 'Say this is a test' }], + model: 'gpt-4o', + input: 'Say this is a test', stream: true, }) .withResponse(); @@ -355,12 +303,6 @@ console.log(result.choices[0]!.message?.content); For more information on support for the Azure API, see [azure.md](azure.md). -## Automated function calls - -We provide the `openai.beta.chat.completions.runTools({…})` convenience helper for using function tool calls with the `/chat/completions` endpoint which automatically call the JavaScript functions you provide and sends their results back to the `/chat/completions` endpoint, looping as long as the model requests tool calls. - -For more information see [helpers.md](helpers.md#automated-function-calls). - ## Advanced Usage ### Accessing raw Response data (e.g., headers) @@ -373,17 +315,19 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi ```ts const client = new OpenAI(); -const response = await client.chat.completions - .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }) +const httpResponse = await client.responses + .create({ model: 'gpt-4o', input: 'say this is a test.' }) .asResponse(); -console.log(response.headers.get('X-My-Header')); -console.log(response.statusText); // access the underlying Response object -const { data: chatCompletion, response: raw } = await client.chat.completions - .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }) +// access the underlying web standard Response object +console.log(httpResponse.headers.get('X-My-Header')); +console.log(httpResponse.statusText); + +const { data: modelResponse, response: raw } = await client.responses + .create({ model: 'gpt-4o', input: 'say this is a test.' }) .withResponse(); console.log(raw.headers.get('X-My-Header')); -console.log(chatCompletion); +console.log(modelResponse); ``` ### Making custom/undocumented requests @@ -432,6 +376,11 @@ validate or strip extra properties from the response from the API. ### Customizing the fetch client +> We're actively working on a new alpha version that migrates from `node-fetch` to builtin fetch. +> +> Please try it out and let us know if you run into any issues! +> https://community.openai.com/t/your-feedback-requested-node-js-sdk-5-0-0-alpha/1063774 + By default, this library uses `node-fetch` in Node, and expects a global `fetch` function in other environments. If you would prefer to use a global, web-standards-compliant `fetch` function even in a Node environment, diff --git a/api.md b/api.md index 63f239628..b21ac2d5f 100644 --- a/api.md +++ b/api.md @@ -2,10 +2,15 @@ Types: +- ChatModel +- ComparisonFilter +- CompoundFilter - ErrorObject - FunctionDefinition - FunctionParameters - Metadata +- Reasoning +- ReasoningEffort - ResponseFormatJSONObject - ResponseFormatJSONSchema - ResponseFormatText @@ -52,7 +57,6 @@ Types: - ChatCompletionModality - ChatCompletionNamedToolChoice - ChatCompletionPredictionContent -- ChatCompletionReasoningEffort - ChatCompletionRole - ChatCompletionStoreMessage - ChatCompletionStreamOptions @@ -63,6 +67,7 @@ Types: - ChatCompletionToolMessageParam - ChatCompletionUserMessageParam - CreateChatCompletionRequestMessage +- ChatCompletionReasoningEffort Methods: @@ -224,6 +229,67 @@ Methods: - client.fineTuning.jobs.checkpoints.list(fineTuningJobId, { ...params }) -> FineTuningJobCheckpointsPage +# VectorStores + +Types: + +- AutoFileChunkingStrategyParam +- FileChunkingStrategy +- FileChunkingStrategyParam +- OtherFileChunkingStrategyObject +- StaticFileChunkingStrategy +- StaticFileChunkingStrategyObject +- StaticFileChunkingStrategyObjectParam +- VectorStore +- VectorStoreDeleted +- VectorStoreSearchResponse + +Methods: + +- client.vectorStores.create({ ...params }) -> VectorStore +- client.vectorStores.retrieve(vectorStoreId) -> VectorStore +- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore +- client.vectorStores.list({ ...params }) -> VectorStoresPage +- client.vectorStores.del(vectorStoreId) -> VectorStoreDeleted +- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponsesPage + +## Files + +Types: + +- VectorStoreFile +- VectorStoreFileDeleted +- FileContentResponse + +Methods: + +- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile +- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile +- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile +- client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesPage +- client.vectorStores.files.del(vectorStoreId, fileId) -> VectorStoreFileDeleted +- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponsesPage +- client.beta.vectorStores.files.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFile> +- client.beta.vectorStores.files.poll(vectorStoreId, fileId, options?) -> Promise<VectorStoreFile> +- client.beta.vectorStores.files.upload(vectorStoreId, file, options?) -> Promise<VectorStoreFile> +- client.beta.vectorStores.files.uploadAndPoll(vectorStoreId, file, options?) -> Promise<VectorStoreFile> + +## FileBatches + +Types: + +- VectorStoreFileBatch + +Methods: + +- client.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatch +- client.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatch +- client.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatch +- client.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesPage +- client.beta.vectorStores.fileBatches.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFileBatch> +- client.beta.vectorStores.fileBatches.poll(vectorStoreId, batchId, options?) -> Promise<VectorStoreFileBatch> +- client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, { files, fileIds = [] }, options?) -> Promise<VectorStoreFileBatch> + # Beta ## Realtime @@ -287,72 +353,6 @@ Methods: - client.beta.realtime.sessions.create({ ...params }) -> SessionCreateResponse -## VectorStores - -Types: - -- AutoFileChunkingStrategyParam -- FileChunkingStrategy -- FileChunkingStrategyParam -- OtherFileChunkingStrategyObject -- StaticFileChunkingStrategy -- StaticFileChunkingStrategyObject -- StaticFileChunkingStrategyObjectParam -- VectorStore -- VectorStoreDeleted - -Methods: - -- client.beta.vectorStores.create({ ...params }) -> VectorStore -- client.beta.vectorStores.retrieve(vectorStoreId) -> VectorStore -- client.beta.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore -- client.beta.vectorStores.list({ ...params }) -> VectorStoresPage -- client.beta.vectorStores.del(vectorStoreId) -> VectorStoreDeleted - -### Files - -Types: - -- VectorStoreFile -- VectorStoreFileDeleted - -Methods: - -- client.beta.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile -- client.beta.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile -- client.beta.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesPage -- client.beta.vectorStores.files.del(vectorStoreId, fileId) -> VectorStoreFileDeleted -- client.beta.vectorStores.files.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFile> -- client.beta.vectorStores.files.poll(vectorStoreId, fileId, options?) -> Promise<VectorStoreFile> -- client.beta.vectorStores.files.upload(vectorStoreId, file, options?) -> Promise<VectorStoreFile> -- client.beta.vectorStores.files.uploadAndPoll(vectorStoreId, file, options?) -> Promise<VectorStoreFile> - -### FileBatches - -Types: - -- VectorStoreFileBatch - -Methods: - -- client.beta.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatch -- client.beta.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatch -- client.beta.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatch -- client.beta.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesPage -- client.beta.vectorStores.fileBatches.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFileBatch> -- client.beta.vectorStores.fileBatches.poll(vectorStoreId, batchId, options?) -> Promise<VectorStoreFileBatch> -- client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, { files, fileIds = [] }, options?) -> Promise<VectorStoreFileBatch> - -## Chat - -### Completions - -Methods: - -- client.beta.chat.completions.runFunctions(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner -- client.beta.chat.completions.runTools(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner -- client.beta.chat.completions.stream(body, options?) -> ChatCompletionStream - ## Assistants Types: @@ -526,3 +526,93 @@ Types: Methods: - client.uploads.parts.create(uploadId, { ...params }) -> UploadPart + +# Responses + +Types: + +- ComputerTool +- EasyInputMessage +- FileSearchTool +- FunctionTool +- Response +- ResponseAudioDeltaEvent +- ResponseAudioDoneEvent +- ResponseAudioTranscriptDeltaEvent +- ResponseAudioTranscriptDoneEvent +- ResponseCodeInterpreterCallCodeDeltaEvent +- ResponseCodeInterpreterCallCodeDoneEvent +- ResponseCodeInterpreterCallCompletedEvent +- ResponseCodeInterpreterCallInProgressEvent +- ResponseCodeInterpreterCallInterpretingEvent +- ResponseCodeInterpreterToolCall +- ResponseCompletedEvent +- ResponseComputerToolCall +- ResponseContent +- ResponseContentPartAddedEvent +- ResponseContentPartDoneEvent +- ResponseCreatedEvent +- ResponseError +- ResponseErrorEvent +- ResponseFailedEvent +- ResponseFileSearchCallCompletedEvent +- ResponseFileSearchCallInProgressEvent +- ResponseFileSearchCallSearchingEvent +- ResponseFileSearchToolCall +- ResponseFormatTextConfig +- ResponseFormatTextJSONSchemaConfig +- ResponseFunctionCallArgumentsDeltaEvent +- ResponseFunctionCallArgumentsDoneEvent +- ResponseFunctionToolCall +- ResponseFunctionWebSearch +- ResponseInProgressEvent +- ResponseIncludable +- ResponseIncompleteEvent +- ResponseInput +- ResponseInputAudio +- ResponseInputContent +- ResponseInputFile +- ResponseInputImage +- ResponseInputItem +- ResponseInputMessageContentList +- ResponseInputText +- ResponseOutputAudio +- ResponseOutputItem +- ResponseOutputItemAddedEvent +- ResponseOutputItemDoneEvent +- ResponseOutputMessage +- ResponseOutputRefusal +- ResponseOutputText +- ResponseRefusalDeltaEvent +- ResponseRefusalDoneEvent +- ResponseStatus +- ResponseStreamEvent +- ResponseTextAnnotationDeltaEvent +- ResponseTextConfig +- ResponseTextDeltaEvent +- ResponseTextDoneEvent +- ResponseUsage +- ResponseWebSearchCallCompletedEvent +- ResponseWebSearchCallInProgressEvent +- ResponseWebSearchCallSearchingEvent +- Tool +- ToolChoiceFunction +- ToolChoiceOptions +- ToolChoiceTypes +- WebSearchTool + +Methods: + +- client.responses.create({ ...params }) -> Response +- client.responses.retrieve(responseId, { ...params }) -> Response +- client.responses.del(responseId) -> void + +## InputItems + +Types: + +- ResponseItemList + +Methods: + +- client.responses.inputItems.list(responseId, { ...params }) -> ResponseItemListDataPage diff --git a/examples/responses/stream.ts b/examples/responses/stream.ts new file mode 100755 index 000000000..ea3d0849e --- /dev/null +++ b/examples/responses/stream.ts @@ -0,0 +1,24 @@ +#!/usr/bin/env -S npm run tsn -T + +import OpenAI from 'openai'; + +const openai = new OpenAI(); + +async function main() { + const runner = openai.responses + .stream({ + model: 'gpt-4o-2024-08-06', + input: 'solve 8x + 31 = 2', + }) + .on('event', (event) => console.log(event)) + .on('response.output_text.delta', (diff) => process.stdout.write(diff.delta)); + + for await (const event of runner) { + console.log('event', event); + } + + const result = await runner.finalResponse(); + console.log(result); +} + +main(); diff --git a/examples/responses/streaming-tools.ts b/examples/responses/streaming-tools.ts new file mode 100755 index 000000000..87a48d0c3 --- /dev/null +++ b/examples/responses/streaming-tools.ts @@ -0,0 +1,52 @@ +#!/usr/bin/env -S npm run tsn -T + +import { OpenAI } from 'openai'; +import { zodResponsesFunction } from 'openai/helpers/zod'; +import { z } from 'zod'; + +const Table = z.enum(['orders', 'customers', 'products']); +const Column = z.enum([ + 'id', + 'status', + 'expected_delivery_date', + 'delivered_at', + 'shipped_at', + 'ordered_at', + 'canceled_at', +]); +const Operator = z.enum(['=', '>', '<', '<=', '>=', '!=']); +const OrderBy = z.enum(['asc', 'desc']); +const DynamicValue = z.object({ + column_name: Column, +}); + +const Condition = z.object({ + column: Column, + operator: Operator, + value: z.union([z.string(), z.number(), DynamicValue]), +}); + +const Query = z.object({ + table_name: Table, + columns: z.array(Column), + conditions: z.array(Condition), + order_by: OrderBy, +}); + +async function main() { + const client = new OpenAI(); + + const tool = zodResponsesFunction({ name: 'query', parameters: Query }); + + const stream = client.responses.stream({ + model: 'gpt-4o-2024-08-06', + input: 'look up all my orders in november of last year that were fulfilled but not delivered on time', + tools: [tool], + }); + + for await (const event of stream) { + console.dir(event, { depth: 10 }); + } +} + +main(); diff --git a/examples/responses/structured-outputs-tools.ts b/examples/responses/structured-outputs-tools.ts new file mode 100755 index 000000000..29eaabf93 --- /dev/null +++ b/examples/responses/structured-outputs-tools.ts @@ -0,0 +1,60 @@ +#!/usr/bin/env -S npm run tsn -T + +import { OpenAI } from 'openai'; +import { zodResponsesFunction } from 'openai/helpers/zod'; +import { z } from 'zod'; + +const Table = z.enum(['orders', 'customers', 'products']); +const Column = z.enum([ + 'id', + 'status', + 'expected_delivery_date', + 'delivered_at', + 'shipped_at', + 'ordered_at', + 'canceled_at', +]); +const Operator = z.enum(['=', '>', '<', '<=', '>=', '!=']); +const OrderBy = z.enum(['asc', 'desc']); +const DynamicValue = z.object({ + column_name: Column, +}); + +const Condition = z.object({ + column: Column, + operator: Operator, + value: z.union([z.string(), z.number(), DynamicValue]), +}); + +const Query = z.object({ + table_name: Table, + columns: z.array(Column), + conditions: z.array(Condition), + order_by: OrderBy, +}); + +async function main() { + const client = new OpenAI(); + + const tool = zodResponsesFunction({ name: 'query', parameters: Query }); + + const rsp = await client.responses.parse({ + model: 'gpt-4o-2024-08-06', + input: 'look up all my orders in november of last year that were fulfilled but not delivered on time', + tools: [tool], + }); + + console.log(rsp); + + const functionCall = rsp.output[0]!; + + if (functionCall.type !== 'function_call') { + throw new Error('Expected function call'); + } + + const query = functionCall.parsed_arguments; + + console.log(query); +} + +main(); diff --git a/examples/responses/structured-outputs.ts b/examples/responses/structured-outputs.ts new file mode 100755 index 000000000..07ff93a60 --- /dev/null +++ b/examples/responses/structured-outputs.ts @@ -0,0 +1,32 @@ +#!/usr/bin/env -S npm run tsn -T + +import { OpenAI } from 'openai'; +import { zodTextFormat } from 'openai/helpers/zod'; +import { z } from 'zod'; + +const Step = z.object({ + explanation: z.string(), + output: z.string(), +}); + +const MathResponse = z.object({ + steps: z.array(Step), + final_answer: z.string(), +}); + +const client = new OpenAI(); + +async function main() { + const rsp = await client.responses.parse({ + input: 'solve 8x + 31 = 2', + model: 'gpt-4o-2024-08-06', + text: { + format: zodTextFormat(MathResponse, 'math_response'), + }, + }); + + console.log(rsp.output_parsed); + console.log('answer: ', rsp.output_parsed?.final_answer); +} + +main().catch(console.error); diff --git a/examples/yarn.lock b/examples/yarn.lock new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/bootstrap b/scripts/bootstrap index 033156d3a..f107c3a24 100755 --- a/scripts/bootstrap +++ b/scripts/bootstrap @@ -4,7 +4,7 @@ set -e cd "$(dirname "$0")/.." -if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then +if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ]; then brew bundle check >/dev/null 2>&1 || { echo "==> Installing Homebrew dependencies…" brew bundle diff --git a/src/core.ts b/src/core.ts index 6578c0781..a41eaa3fa 100644 --- a/src/core.ts +++ b/src/core.ts @@ -62,8 +62,8 @@ async function defaultParseResponse(props: APIResponseProps): Promise { return _zodToJsonSchema(schema, { @@ -74,6 +78,23 @@ export function zodResponseFormat( ); } +export function zodTextFormat( + zodObject: ZodInput, + name: string, + props?: Omit, +): AutoParseableTextFormat> { + return makeParseableTextFormat( + { + type: 'json_schema', + ...props, + name, + strict: true, + schema: zodToJsonSchema(zodObject, { name }), + }, + (content) => zodObject.parse(JSON.parse(content)), + ); +} + /** * Creates a chat completion `function` tool that can be invoked * automatically by the chat completion `.runTools()` method or automatically @@ -106,3 +127,28 @@ export function zodFunction(options: { }, ); } + +export function zodResponsesFunction(options: { + name: string; + parameters: Parameters; + function?: ((args: zodInfer) => unknown | Promise) | undefined; + description?: string | undefined; +}): AutoParseableResponseTool<{ + arguments: Parameters; + name: string; + function: (args: zodInfer) => unknown; +}> { + return makeParseableResponseTool( + { + type: 'function', + name: options.name, + parameters: zodToJsonSchema(options.parameters, { name: options.name }), + strict: true, + ...(options.description ? { description: options.description } : undefined), + }, + { + callback: options.function, + parser: (args) => options.parameters.parse(JSON.parse(args)), + }, + ); +} diff --git a/src/index.ts b/src/index.ts index debefce8c..c3abed2db 100644 --- a/src/index.ts +++ b/src/index.ts @@ -65,14 +65,34 @@ import { } from './resources/moderations'; import { Audio, AudioModel, AudioResponseFormat } from './resources/audio/audio'; import { Beta } from './resources/beta/beta'; -import { Chat, ChatModel } from './resources/chat/chat'; +import { Chat } from './resources/chat/chat'; import { FineTuning } from './resources/fine-tuning/fine-tuning'; +import { Responses } from './resources/responses/responses'; import { Upload, UploadCompleteParams, UploadCreateParams, Uploads as UploadsAPIUploads, } from './resources/uploads/uploads'; +import { + AutoFileChunkingStrategyParam, + FileChunkingStrategy, + FileChunkingStrategyParam, + OtherFileChunkingStrategyObject, + StaticFileChunkingStrategy, + StaticFileChunkingStrategyObject, + StaticFileChunkingStrategyObjectParam, + VectorStore, + VectorStoreCreateParams, + VectorStoreDeleted, + VectorStoreListParams, + VectorStoreSearchParams, + VectorStoreSearchResponse, + VectorStoreSearchResponsesPage, + VectorStoreUpdateParams, + VectorStores, + VectorStoresPage, +} from './resources/vector-stores/vector-stores'; import { ChatCompletion, ChatCompletionAssistantMessageParam, @@ -98,7 +118,6 @@ import { ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionPredictionContent, - ChatCompletionReasoningEffort, ChatCompletionRole, ChatCompletionStoreMessage, ChatCompletionStreamOptions, @@ -267,9 +286,11 @@ export class OpenAI extends Core.APIClient { moderations: API.Moderations = new API.Moderations(this); models: API.Models = new API.Models(this); fineTuning: API.FineTuning = new API.FineTuning(this); + vectorStores: API.VectorStores = new API.VectorStores(this); beta: API.Beta = new API.Beta(this); batches: API.Batches = new API.Batches(this); uploads: API.Uploads = new API.Uploads(this); + responses: API.Responses = new API.Responses(this); protected override defaultQuery(): Core.DefaultQuery | undefined { return this._options.defaultQuery; @@ -325,10 +346,14 @@ OpenAI.Moderations = Moderations; OpenAI.Models = Models; OpenAI.ModelsPage = ModelsPage; OpenAI.FineTuning = FineTuning; +OpenAI.VectorStores = VectorStores; +OpenAI.VectorStoresPage = VectorStoresPage; +OpenAI.VectorStoreSearchResponsesPage = VectorStoreSearchResponsesPage; OpenAI.Beta = Beta; OpenAI.Batches = Batches; OpenAI.BatchesPage = BatchesPage; OpenAI.Uploads = UploadsAPIUploads; +OpenAI.Responses = Responses; export declare namespace OpenAI { export type RequestOptions = Core.RequestOptions; @@ -350,7 +375,6 @@ export declare namespace OpenAI { export { Chat as Chat, - type ChatModel as ChatModel, type ChatCompletion as ChatCompletion, type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam, type ChatCompletionAudio as ChatCompletionAudio, @@ -371,7 +395,6 @@ export declare namespace OpenAI { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, @@ -440,6 +463,26 @@ export declare namespace OpenAI { export { FineTuning as FineTuning }; + export { + VectorStores as VectorStores, + type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, + type FileChunkingStrategy as FileChunkingStrategy, + type FileChunkingStrategyParam as FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy as StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, + type VectorStore as VectorStore, + type VectorStoreDeleted as VectorStoreDeleted, + type VectorStoreSearchResponse as VectorStoreSearchResponse, + VectorStoresPage as VectorStoresPage, + VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, + type VectorStoreCreateParams as VectorStoreCreateParams, + type VectorStoreUpdateParams as VectorStoreUpdateParams, + type VectorStoreListParams as VectorStoreListParams, + type VectorStoreSearchParams as VectorStoreSearchParams, + }; + export { Beta as Beta }; export { @@ -459,10 +502,17 @@ export declare namespace OpenAI { type UploadCompleteParams as UploadCompleteParams, }; + export { Responses as Responses }; + + export type ChatModel = API.ChatModel; + export type ComparisonFilter = API.ComparisonFilter; + export type CompoundFilter = API.CompoundFilter; export type ErrorObject = API.ErrorObject; export type FunctionDefinition = API.FunctionDefinition; export type FunctionParameters = API.FunctionParameters; export type Metadata = API.Metadata; + export type Reasoning = API.Reasoning; + export type ReasoningEffort = API.ReasoningEffort; export type ResponseFormatJSONObject = API.ResponseFormatJSONObject; export type ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; export type ResponseFormatText = API.ResponseFormatText; diff --git a/src/lib/ResponsesParser.ts b/src/lib/ResponsesParser.ts new file mode 100644 index 000000000..780b779ff --- /dev/null +++ b/src/lib/ResponsesParser.ts @@ -0,0 +1,262 @@ +import { OpenAIError } from '../error'; +import { type ChatCompletionTool } from '../resources'; +import { + type FunctionTool, + type ParsedContent, + type ParsedResponse, + type ParsedResponseFunctionToolCall, + type ParsedResponseOutputItem, + type Response, + type ResponseCreateParamsBase, + type ResponseCreateParamsNonStreaming, + type ResponseFunctionToolCall, + type Tool, +} from '../resources/responses/responses'; +import { type AutoParseableTextFormat, isAutoParsableResponseFormat } from '../lib/parser'; + +type ParseableToolsParams = Array | ChatCompletionTool | null; + +export type ResponseCreateParamsWithTools = ResponseCreateParamsBase & { + tools?: ParseableToolsParams; +}; + +export type ExtractParsedContentFromParams = + NonNullable['format'] extends AutoParseableTextFormat ? P : null; + +export function maybeParseResponse< + Params extends ResponseCreateParamsBase | null, + ParsedT = Params extends null ? null : ExtractParsedContentFromParams>, +>(response: Response, params: Params): ParsedResponse { + if (!params || !hasAutoParseableInput(params)) { + return { + ...response, + output_parsed: null, + output: response.output.map((item) => { + if (item.type === 'function_call') { + return { + ...item, + parsed_arguments: null, + }; + } + + if (item.type === 'message') { + return { + ...item, + content: item.content.map((content) => ({ + ...content, + parsed: null, + })), + }; + } else { + return item; + } + }), + }; + } + + return parseResponse(response, params); +} + +export function parseResponse< + Params extends ResponseCreateParamsBase, + ParsedT = ExtractParsedContentFromParams, +>(response: Response, params: Params): ParsedResponse { + const output: Array> = response.output.map( + (item): ParsedResponseOutputItem => { + if (item.type === 'function_call') { + return { + ...item, + parsed_arguments: parseToolCall(params, item), + }; + } + if (item.type === 'message') { + const content: Array> = item.content.map((content) => { + if (content.type === 'output_text') { + return { + ...content, + parsed: parseTextFormat(params, content.text), + }; + } + + return content; + }); + + return { + ...item, + content, + }; + } + + return item; + }, + ); + + const parsed: Omit, 'output_parsed'> = Object.assign({}, response, { output }); + if (!Object.getOwnPropertyDescriptor(response, 'output_text')) { + addOutputText(parsed); + } + + Object.defineProperty(parsed, 'output_parsed', { + enumerable: true, + get() { + for (const output of parsed.output) { + if (output.type !== 'message') { + continue; + } + + for (const content of output.content) { + if (content.type === 'output_text' && content.parsed !== null) { + return content.parsed; + } + } + } + + return null; + }, + }); + + return parsed as ParsedResponse; +} + +function parseTextFormat< + Params extends ResponseCreateParamsBase, + ParsedT = ExtractParsedContentFromParams, +>(params: Params, content: string): ParsedT | null { + if (params.text?.format?.type !== 'json_schema') { + return null; + } + + if ('$parseRaw' in params.text?.format) { + const text_format = params.text?.format as unknown as AutoParseableTextFormat; + return text_format.$parseRaw(content); + } + + return JSON.parse(content); +} + +export function hasAutoParseableInput(params: ResponseCreateParamsWithTools): boolean { + if (isAutoParsableResponseFormat(params.text?.format)) { + return true; + } + + return false; +} + +type ToolOptions = { + name: string; + arguments: any; + function?: ((args: any) => any) | undefined; +}; + +export type AutoParseableResponseTool< + OptionsT extends ToolOptions, + HasFunction = OptionsT['function'] extends Function ? true : false, +> = FunctionTool & { + __arguments: OptionsT['arguments']; // type-level only + __name: OptionsT['name']; // type-level only + + $brand: 'auto-parseable-tool'; + $callback: ((args: OptionsT['arguments']) => any) | undefined; + $parseRaw(args: string): OptionsT['arguments']; +}; + +export function makeParseableResponseTool( + tool: FunctionTool, + { + parser, + callback, + }: { + parser: (content: string) => OptionsT['arguments']; + callback: ((args: any) => any) | undefined; + }, +): AutoParseableResponseTool { + const obj = { ...tool }; + + Object.defineProperties(obj, { + $brand: { + value: 'auto-parseable-tool', + enumerable: false, + }, + $parseRaw: { + value: parser, + enumerable: false, + }, + $callback: { + value: callback, + enumerable: false, + }, + }); + + return obj as AutoParseableResponseTool; +} + +export function isAutoParsableTool(tool: any): tool is AutoParseableResponseTool { + return tool?.['$brand'] === 'auto-parseable-tool'; +} + +function getInputToolByName(input_tools: Array, name: string): FunctionTool | undefined { + return input_tools.find((tool) => tool.type === 'function' && tool.name === name) as + | FunctionTool + | undefined; +} + +function parseToolCall( + params: Params, + toolCall: ResponseFunctionToolCall, +): ParsedResponseFunctionToolCall { + const inputTool = getInputToolByName(params.tools ?? [], toolCall.name); + + return { + ...toolCall, + ...toolCall, + parsed_arguments: + isAutoParsableTool(inputTool) ? inputTool.$parseRaw(toolCall.arguments) + : inputTool?.strict ? JSON.parse(toolCall.arguments) + : null, + }; +} + +export function shouldParseToolCall( + params: ResponseCreateParamsNonStreaming | null | undefined, + toolCall: ResponseFunctionToolCall, +): boolean { + if (!params) { + return false; + } + + const inputTool = getInputToolByName(params.tools ?? [], toolCall.name); + return isAutoParsableTool(inputTool) || inputTool?.strict || false; +} + +export function validateInputTools(tools: ChatCompletionTool[] | undefined) { + for (const tool of tools ?? []) { + if (tool.type !== 'function') { + throw new OpenAIError( + `Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``, + ); + } + + if (tool.function.strict !== true) { + throw new OpenAIError( + `The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`, + ); + } + } +} + +export function addOutputText(rsp: Response): void { + const texts: string[] = []; + for (const output of rsp.output) { + if (output.type !== 'message') { + continue; + } + + for (const content of output.content) { + if (content.type === 'output_text') { + texts.push(content.text); + } + } + } + + rsp.output_text = texts.join(''); +} diff --git a/src/lib/parser.ts b/src/lib/parser.ts index a750375dc..d75d32a40 100644 --- a/src/lib/parser.ts +++ b/src/lib/parser.ts @@ -14,6 +14,7 @@ import { } from '../resources/beta/chat/completions'; import { ResponseFormatJSONSchema } from '../resources/shared'; import { ContentFilterFinishReasonError, LengthFinishReasonError, OpenAIError } from '../error'; +import { type ResponseFormatTextJSONSchemaConfig } from '../resources/responses/responses'; type AnyChatCompletionCreateParams = | ChatCompletionCreateParams @@ -51,6 +52,33 @@ export function makeParseableResponseFormat( return obj as AutoParseableResponseFormat; } +export type AutoParseableTextFormat = ResponseFormatTextJSONSchemaConfig & { + __output: ParsedT; // type-level only + + $brand: 'auto-parseable-response-format'; + $parseRaw(content: string): ParsedT; +}; + +export function makeParseableTextFormat( + response_format: ResponseFormatTextJSONSchemaConfig, + parser: (content: string) => ParsedT, +): AutoParseableTextFormat { + const obj = { ...response_format }; + + Object.defineProperties(obj, { + $brand: { + value: 'auto-parseable-response-format', + enumerable: false, + }, + $parseRaw: { + value: parser, + enumerable: false, + }, + }); + + return obj as AutoParseableTextFormat; +} + export function isAutoParsableResponseFormat( response_format: any, ): response_format is AutoParseableResponseFormat { diff --git a/src/lib/responses/EventTypes.ts b/src/lib/responses/EventTypes.ts new file mode 100644 index 000000000..fc1620988 --- /dev/null +++ b/src/lib/responses/EventTypes.ts @@ -0,0 +1,76 @@ +import { + ResponseAudioDeltaEvent, + ResponseAudioDoneEvent, + ResponseAudioTranscriptDeltaEvent, + ResponseAudioTranscriptDoneEvent, + ResponseCodeInterpreterCallCodeDeltaEvent, + ResponseCodeInterpreterCallCodeDoneEvent, + ResponseCodeInterpreterCallCompletedEvent, + ResponseCodeInterpreterCallInProgressEvent, + ResponseCodeInterpreterCallInterpretingEvent, + ResponseCompletedEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseCreatedEvent, + ResponseErrorEvent, + ResponseFailedEvent, + ResponseFileSearchCallCompletedEvent, + ResponseFileSearchCallInProgressEvent, + ResponseFileSearchCallSearchingEvent, + ResponseFunctionCallArgumentsDeltaEvent as RawResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionCallArgumentsDoneEvent, + ResponseInProgressEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseRefusalDeltaEvent, + ResponseRefusalDoneEvent, + ResponseTextAnnotationDeltaEvent, + ResponseTextDeltaEvent as RawResponseTextDeltaEvent, + ResponseTextDoneEvent, + ResponseIncompleteEvent, + ResponseWebSearchCallCompletedEvent, + ResponseWebSearchCallInProgressEvent, + ResponseWebSearchCallSearchingEvent, +} from '../../resources/responses/responses'; + +export type ResponseFunctionCallArgumentsDeltaEvent = RawResponseFunctionCallArgumentsDeltaEvent & { + snapshot: string; +}; + +export type ResponseTextDeltaEvent = RawResponseTextDeltaEvent & { + snapshot: string; +}; + +export type ParsedResponseStreamEvent = + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseCodeInterpreterCallCodeDeltaEvent + | ResponseCodeInterpreterCallCodeDoneEvent + | ResponseCodeInterpreterCallCompletedEvent + | ResponseCodeInterpreterCallInProgressEvent + | ResponseCodeInterpreterCallInterpretingEvent + | ResponseCompletedEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent + | ResponseCreatedEvent + | ResponseErrorEvent + | ResponseFileSearchCallCompletedEvent + | ResponseFileSearchCallInProgressEvent + | ResponseFileSearchCallSearchingEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent + | ResponseInProgressEvent + | ResponseFailedEvent + | ResponseIncompleteEvent + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseRefusalDeltaEvent + | ResponseRefusalDoneEvent + | ResponseTextAnnotationDeltaEvent + | ResponseTextDeltaEvent + | ResponseTextDoneEvent + | ResponseWebSearchCallCompletedEvent + | ResponseWebSearchCallInProgressEvent + | ResponseWebSearchCallSearchingEvent; diff --git a/src/lib/responses/ResponseStream.ts b/src/lib/responses/ResponseStream.ts new file mode 100644 index 000000000..0d6cd47dd --- /dev/null +++ b/src/lib/responses/ResponseStream.ts @@ -0,0 +1,298 @@ +import { + type ParsedResponse, + type Response, + type ResponseCreateParamsBase, + type ResponseCreateParamsStreaming, + type ResponseStreamEvent, +} from 'openai/resources/responses/responses'; +import * as Core from '../../core'; +import { APIUserAbortError, OpenAIError } from '../../error'; +import OpenAI from '../../index'; +import { type BaseEvents, EventStream } from '../EventStream'; +import { type ResponseFunctionCallArgumentsDeltaEvent, type ResponseTextDeltaEvent } from './EventTypes'; +import { maybeParseResponse } from '../ResponsesParser'; + +export type ResponseStreamParams = Omit & { + stream?: true; +}; + +type ResponseEvents = BaseEvents & + Omit< + { + [K in ResponseStreamEvent['type']]: (event: Extract) => void; + }, + 'response.output_text.delta' | 'response.function_call_arguments.delta' + > & { + event: (event: ResponseStreamEvent) => void; + 'response.output_text.delta': (event: ResponseTextDeltaEvent) => void; + 'response.function_call_arguments.delta': (event: ResponseFunctionCallArgumentsDeltaEvent) => void; + }; + +export type ResponseStreamingParams = Omit & { + stream?: true; +}; + +export class ResponseStream + extends EventStream + implements AsyncIterable +{ + #params: ResponseStreamingParams | null; + #currentResponseSnapshot: Response | undefined; + #finalResponse: ParsedResponse | undefined; + + constructor(params: ResponseStreamingParams | null) { + super(); + this.#params = params; + } + + static createResponse( + client: OpenAI, + params: ResponseStreamParams, + options?: Core.RequestOptions, + ): ResponseStream { + const runner = new ResponseStream(params as ResponseCreateParamsStreaming); + runner._run(() => + runner._createResponse(client, params, { + ...options, + headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' }, + }), + ); + return runner; + } + + #beginRequest() { + if (this.ended) return; + this.#currentResponseSnapshot = undefined; + } + + #addEvent(this: ResponseStream, event: ResponseStreamEvent) { + if (this.ended) return; + + const response = this.#accumulateResponse(event); + this._emit('event', event); + + switch (event.type) { + case 'response.output_text.delta': { + const output = response.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'message') { + const content = output.content[event.content_index]; + if (!content) { + throw new OpenAIError(`missing content at index ${event.content_index}`); + } + if (content.type !== 'output_text') { + throw new OpenAIError(`expected content to be 'output_text', got ${content.type}`); + } + + this._emit('response.output_text.delta', { + ...event, + snapshot: content.text, + }); + } + break; + } + case 'response.function_call_arguments.delta': { + const output = response.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'function_call') { + this._emit('response.function_call_arguments.delta', { + ...event, + snapshot: output.arguments, + }); + } + break; + } + default: + // @ts-ignore + this._emit(event.type, event); + break; + } + } + + #endRequest(): ParsedResponse { + if (this.ended) { + throw new OpenAIError(`stream has ended, this shouldn't happen`); + } + const snapshot = this.#currentResponseSnapshot; + if (!snapshot) { + throw new OpenAIError(`request ended without sending any events`); + } + this.#currentResponseSnapshot = undefined; + const parsedResponse = finalizeResponse(snapshot, this.#params); + this.#finalResponse = parsedResponse; + + return parsedResponse; + } + + protected async _createResponse( + client: OpenAI, + params: ResponseStreamingParams, + options?: Core.RequestOptions, + ): Promise> { + const signal = options?.signal; + if (signal) { + if (signal.aborted) this.controller.abort(); + signal.addEventListener('abort', () => this.controller.abort()); + } + this.#beginRequest(); + + const stream = await client.responses.create( + { ...params, stream: true }, + { ...options, signal: this.controller.signal }, + ); + this._connected(); + for await (const event of stream) { + this.#addEvent(event); + } + if (stream.controller.signal?.aborted) { + throw new APIUserAbortError(); + } + return this.#endRequest(); + } + + #accumulateResponse(event: ResponseStreamEvent): Response { + let snapshot = this.#currentResponseSnapshot; + if (!snapshot) { + if (event.type !== 'response.created') { + throw new OpenAIError( + `When snapshot hasn't been set yet, expected 'response.created' event, got ${event.type}`, + ); + } + snapshot = this.#currentResponseSnapshot = event.response; + return snapshot; + } + + switch (event.type) { + case 'response.output_item.added': { + snapshot.output.push(event.item); + break; + } + case 'response.content_part.added': { + const output = snapshot.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'message') { + output.content.push(event.part); + } + break; + } + case 'response.output_text.delta': { + const output = snapshot.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'message') { + const content = output.content[event.content_index]; + if (!content) { + throw new OpenAIError(`missing content at index ${event.content_index}`); + } + if (content.type !== 'output_text') { + throw new OpenAIError(`expected content to be 'output_text', got ${content.type}`); + } + content.text += event.delta; + } + break; + } + case 'response.function_call_arguments.delta': { + const output = snapshot.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'function_call') { + output.arguments += event.delta; + } + break; + } + case 'response.completed': { + this.#currentResponseSnapshot = event.response; + break; + } + } + + return snapshot; + } + + [Symbol.asyncIterator](this: ResponseStream): AsyncIterator { + const pushQueue: ResponseStreamEvent[] = []; + const readQueue: { + resolve: (event: ResponseStreamEvent | undefined) => void; + reject: (err: unknown) => void; + }[] = []; + let done = false; + + this.on('event', (event) => { + const reader = readQueue.shift(); + if (reader) { + reader.resolve(event); + } else { + pushQueue.push(event); + } + }); + + this.on('end', () => { + done = true; + for (const reader of readQueue) { + reader.resolve(undefined); + } + readQueue.length = 0; + }); + + this.on('abort', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); + } + readQueue.length = 0; + }); + + this.on('error', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); + } + readQueue.length = 0; + }); + + return { + next: async (): Promise> => { + if (!pushQueue.length) { + if (done) { + return { value: undefined, done: true }; + } + return new Promise((resolve, reject) => + readQueue.push({ resolve, reject }), + ).then((event) => (event ? { value: event, done: false } : { value: undefined, done: true })); + } + const event = pushQueue.shift()!; + return { value: event, done: false }; + }, + return: async () => { + this.abort(); + return { value: undefined, done: true }; + }, + }; + } + + /** + * @returns a promise that resolves with the final Response, or rejects + * if an error occurred or the stream ended prematurely without producing a REsponse. + */ + async finalResponse(): Promise> { + await this.done(); + const response = this.#finalResponse; + if (!response) throw new OpenAIError('stream ended without producing a ChatCompletion'); + return response; + } +} + +function finalizeResponse( + snapshot: Response, + params: ResponseStreamingParams | null, +): ParsedResponse { + return maybeParseResponse(snapshot, params); +} diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 919bf53b3..0668dcf54 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -4,10 +4,8 @@ import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; import * as Core from '../../core'; import * as Shared from '../shared'; -import * as ChatAPI from '../chat/chat'; import * as MessagesAPI from './threads/messages'; import * as ThreadsAPI from './threads/threads'; -import * as VectorStoresAPI from './vector-stores/vector-stores'; import * as RunsAPI from './threads/runs/runs'; import * as StepsAPI from './threads/runs/steps'; import { CursorPage, type CursorPageParams } from '../../pagination'; @@ -1105,7 +1103,7 @@ export interface AssistantCreateParams { * [Model overview](https://platform.openai.com/docs/models) for descriptions of * them. */ - model: (string & {}) | ChatAPI.ChatModel; + model: (string & {}) | Shared.ChatModel; /** * The description of the assistant. The maximum length is 512 characters. @@ -1134,14 +1132,14 @@ export interface AssistantCreateParams { name?: string | null; /** - * **o1 and o3-mini models only** + * **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: 'low' | 'medium' | 'high' | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * Specifies the format that the model must output. Compatible with @@ -1244,9 +1242,9 @@ export namespace AssistantCreateParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. Only applicable if `file_ids` is non-empty. + * strategy. */ - chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; + chunking_strategy?: VectorStore.Auto | VectorStore.Static; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -1265,6 +1263,45 @@ export namespace AssistantCreateParams { */ metadata?: Shared.Metadata | null; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } @@ -1337,14 +1374,14 @@ export interface AssistantUpdateParams { name?: string | null; /** - * **o1 and o3-mini models only** + * **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: 'low' | 'medium' | 'high' | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * Specifies the format that the model must output. Compatible with diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index df929b2f7..0b909de18 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -40,36 +40,16 @@ import { ThreadUpdateParams, Threads, } from './threads/threads'; -import * as VectorStoresAPI from './vector-stores/vector-stores'; -import { - AutoFileChunkingStrategyParam, - FileChunkingStrategy, - FileChunkingStrategyParam, - OtherFileChunkingStrategyObject, - StaticFileChunkingStrategy, - StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyObjectParam, - VectorStore, - VectorStoreCreateParams, - VectorStoreDeleted, - VectorStoreListParams, - VectorStoreUpdateParams, - VectorStores, - VectorStoresPage, -} from './vector-stores/vector-stores'; import { Chat } from './chat/chat'; export class Beta extends APIResource { realtime: RealtimeAPI.Realtime = new RealtimeAPI.Realtime(this._client); - vectorStores: VectorStoresAPI.VectorStores = new VectorStoresAPI.VectorStores(this._client); chat: ChatAPI.Chat = new ChatAPI.Chat(this._client); assistants: AssistantsAPI.Assistants = new AssistantsAPI.Assistants(this._client); threads: ThreadsAPI.Threads = new ThreadsAPI.Threads(this._client); } Beta.Realtime = Realtime; -Beta.VectorStores = VectorStores; -Beta.VectorStoresPage = VectorStoresPage; Beta.Assistants = Assistants; Beta.AssistantsPage = AssistantsPage; Beta.Threads = Threads; @@ -77,23 +57,6 @@ Beta.Threads = Threads; export declare namespace Beta { export { Realtime as Realtime }; - export { - VectorStores as VectorStores, - type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, - type FileChunkingStrategy as FileChunkingStrategy, - type FileChunkingStrategyParam as FileChunkingStrategyParam, - type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, - type StaticFileChunkingStrategy as StaticFileChunkingStrategy, - type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, - type VectorStore as VectorStore, - type VectorStoreDeleted as VectorStoreDeleted, - VectorStoresPage as VectorStoresPage, - type VectorStoreCreateParams as VectorStoreCreateParams, - type VectorStoreUpdateParams as VectorStoreUpdateParams, - type VectorStoreListParams as VectorStoreListParams, - }; - export { Chat }; export { diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index babca0016..b9cef17cb 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -37,19 +37,3 @@ export { type ThreadCreateAndRunPollParams, type ThreadCreateAndRunStreamParams, } from './threads/index'; -export { - VectorStoresPage, - VectorStores, - type AutoFileChunkingStrategyParam, - type FileChunkingStrategy, - type FileChunkingStrategyParam, - type OtherFileChunkingStrategyObject, - type StaticFileChunkingStrategy, - type StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyObjectParam, - type VectorStore, - type VectorStoreDeleted, - type VectorStoreCreateParams, - type VectorStoreUpdateParams, - type VectorStoreListParams, -} from './vector-stores/index'; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 8ab94cc99..15bfb4204 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -10,7 +10,6 @@ import { RunSubmitToolOutputsParamsStream } from '../../../../lib/AssistantStrea import * as RunsAPI from './runs'; import * as Shared from '../../../shared'; import * as AssistantsAPI from '../../assistants'; -import * as ChatAPI from '../../../chat/chat'; import * as MessagesAPI from '../messages'; import * as ThreadsAPI from '../threads'; import * as StepsAPI from './steps'; @@ -722,7 +721,7 @@ export interface RunCreateParamsBase { * associated with the assistant. If not, the model associated with the assistant * will be used. */ - model?: (string & {}) | ChatAPI.ChatModel | null; + model?: (string & {}) | Shared.ChatModel | null; /** * Body param: Whether to enable @@ -732,14 +731,14 @@ export interface RunCreateParamsBase { parallel_tool_calls?: boolean; /** - * Body param: **o1 and o3-mini models only** + * Body param: **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: 'low' | 'medium' | 'high' | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * Body param: Specifies the format that the model must output. Compatible with diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 3f69c6e60..8075ba0ac 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -8,7 +8,6 @@ import * as Core from '../../../core'; import * as ThreadsAPI from './threads'; import * as Shared from '../../shared'; import * as AssistantsAPI from '../assistants'; -import * as ChatAPI from '../../chat/chat'; import * as MessagesAPI from './messages'; import { Annotation, @@ -45,7 +44,6 @@ import { TextDelta, TextDeltaBlock, } from './messages'; -import * as VectorStoresAPI from '../vector-stores/vector-stores'; import * as RunsAPI from './runs/runs'; import { RequiredActionFunctionToolCall, @@ -441,9 +439,9 @@ export namespace ThreadCreateParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. Only applicable if `file_ids` is non-empty. + * strategy. */ - chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; + chunking_strategy?: VectorStore.Auto | VectorStore.Static; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -462,6 +460,45 @@ export namespace ThreadCreateParams { */ metadata?: Shared.Metadata | null; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } @@ -573,7 +610,7 @@ export interface ThreadCreateAndRunParamsBase { * model associated with the assistant. If not, the model associated with the * assistant will be used. */ - model?: (string & {}) | ChatAPI.ChatModel | null; + model?: (string & {}) | Shared.ChatModel | null; /** * Whether to enable @@ -800,9 +837,9 @@ export namespace ThreadCreateAndRunParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. Only applicable if `file_ids` is non-empty. + * strategy. */ - chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; + chunking_strategy?: VectorStore.Auto | VectorStore.Static; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -821,6 +858,45 @@ export namespace ThreadCreateAndRunParams { */ metadata?: Shared.Metadata | null; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 627b4fc23..9dbc636d8 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -1,6 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; +import * as Shared from '../shared'; import * as CompletionsAPI from './completions/completions'; import { ChatCompletion, @@ -52,48 +53,7 @@ export class Chat extends APIResource { completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client); } -export type ChatModel = - | 'o3-mini' - | 'o3-mini-2025-01-31' - | 'o1' - | 'o1-2024-12-17' - | 'o1-preview' - | 'o1-preview-2024-09-12' - | 'o1-mini' - | 'o1-mini-2024-09-12' - | 'gpt-4.5-preview' - | 'gpt-4.5-preview-2025-02-27' - | 'gpt-4o' - | 'gpt-4o-2024-11-20' - | 'gpt-4o-2024-08-06' - | 'gpt-4o-2024-05-13' - | 'gpt-4o-audio-preview' - | 'gpt-4o-audio-preview-2024-10-01' - | 'gpt-4o-audio-preview-2024-12-17' - | 'gpt-4o-mini-audio-preview' - | 'gpt-4o-mini-audio-preview-2024-12-17' - | 'chatgpt-4o-latest' - | 'gpt-4o-mini' - | 'gpt-4o-mini-2024-07-18' - | 'gpt-4-turbo' - | 'gpt-4-turbo-2024-04-09' - | 'gpt-4-0125-preview' - | 'gpt-4-turbo-preview' - | 'gpt-4-1106-preview' - | 'gpt-4-vision-preview' - | 'gpt-4' - | 'gpt-4-0314' - | 'gpt-4-0613' - | 'gpt-4-32k' - | 'gpt-4-32k-0314' - | 'gpt-4-32k-0613' - | 'gpt-3.5-turbo' - | 'gpt-3.5-turbo-16k' - | 'gpt-3.5-turbo-0301' - | 'gpt-3.5-turbo-0613' - | 'gpt-3.5-turbo-1106' - | 'gpt-3.5-turbo-0125' - | 'gpt-3.5-turbo-16k-0613'; +export type ChatModel = Shared.ChatModel; Chat.Completions = Completions; Chat.ChatCompletionsPage = ChatCompletionsPage; @@ -123,7 +83,6 @@ export declare namespace Chat { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, @@ -134,6 +93,7 @@ export declare namespace Chat { type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type CompletionCreateParams as CompletionCreateParams, diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 3af4a3a1d..7b1c353e2 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -7,7 +7,6 @@ import * as Core from '../../../core'; import * as CompletionsCompletionsAPI from './completions'; import * as CompletionsAPI from '../../completions'; import * as Shared from '../../shared'; -import * as ChatAPI from '../chat'; import * as MessagesAPI from './messages'; import { MessageListParams, Messages } from './messages'; import { CursorPage, type CursorPageParams } from '../../../pagination'; @@ -17,6 +16,13 @@ export class Completions extends APIResource { messages: MessagesAPI.Messages = new MessagesAPI.Messages(this._client); /** + * **Starting a new project?** We recommend trying + * [Responses](https://platform.openai.com/docs/api-reference/responses) to take + * advantage of the latest OpenAI platform features. Compare + * [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + * + * --- + * * Creates a model response for the given chat conversation. Learn more in the * [text generation](https://platform.openai.com/docs/guides/text-generation), * [vision](https://platform.openai.com/docs/guides/vision), and @@ -50,7 +56,7 @@ export class Completions extends APIResource { } /** - * Get a stored chat completion. Only chat completions that have been created with + * Get a stored chat completion. Only Chat Completions that have been created with * the `store` parameter set to `true` will be returned. */ retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise { @@ -58,7 +64,7 @@ export class Completions extends APIResource { } /** - * Modify a stored chat completion. Only chat completions that have been created + * Modify a stored chat completion. Only Chat Completions that have been created * with the `store` parameter set to `true` can be modified. Currently, the only * supported modification is to update the `metadata` field. */ @@ -71,7 +77,7 @@ export class Completions extends APIResource { } /** - * List stored chat completions. Only chat completions that have been stored with + * List stored Chat Completions. Only Chat Completions that have been stored with * the `store` parameter set to `true` will be returned. */ list( @@ -90,7 +96,7 @@ export class Completions extends APIResource { } /** - * Delete a stored chat completion. Only chat completions that have been created + * Delete a stored chat completion. Only Chat Completions that have been created * with the `store` parameter set to `true` can be deleted. */ del(completionId: string, options?: Core.RequestOptions): Core.APIPromise { @@ -316,16 +322,16 @@ export interface ChatCompletionAudioParam { format: 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16'; /** - * The voice the model uses to respond. Supported voices are `ash`, `ballad`, - * `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, - * `echo`, and `shimmer`; these voices are less expressive). + * The voice the model uses to respond. Supported voices are `alloy`, `ash`, + * `ballad`, `coral`, `echo`, `sage`, and `shimmer`. */ voice: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; } /** - * Represents a streamed chunk of a chat completion response returned by model, + * Represents a streamed chunk of a chat completion response returned by the model, * based on the provided input. + * [Learn more](https://platform.openai.com/docs/guides/streaming-responses). */ export interface ChatCompletionChunk { /** @@ -512,7 +518,43 @@ export namespace ChatCompletionChunk { export type ChatCompletionContentPart = | ChatCompletionContentPartText | ChatCompletionContentPartImage - | ChatCompletionContentPartInputAudio; + | ChatCompletionContentPartInputAudio + | ChatCompletionContentPart.File; + +export namespace ChatCompletionContentPart { + /** + * Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text + * generation. + */ + export interface File { + file: File.File; + + /** + * The type of the content part. Always `file`. + */ + type: 'file'; + } + + export namespace File { + export interface File { + /** + * The base64 encoded file data, used when passing the file to the model as a + * string. + */ + file_data?: string; + + /** + * The ID of an uploaded file to use as input. + */ + file_id?: string; + + /** + * The name of the file, used when passing the file to the model as a string. + */ + file_name?: string; + } + } +} /** * Learn about [image inputs](https://platform.openai.com/docs/guides/vision). @@ -685,6 +727,12 @@ export interface ChatCompletionMessage { */ role: 'assistant'; + /** + * Annotations for the message, when applicable, as when using the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + */ + annotations?: Array; + /** * If the audio output modality is requested, this object contains data about the * audio response from the model. @@ -705,6 +753,48 @@ export interface ChatCompletionMessage { } export namespace ChatCompletionMessage { + /** + * A URL citation when using web search. + */ + export interface Annotation { + /** + * The type of the URL citation. Always `url_citation`. + */ + type: 'url_citation'; + + /** + * A URL citation when using web search. + */ + url_citation: Annotation.URLCitation; + } + + export namespace Annotation { + /** + * A URL citation when using web search. + */ + export interface URLCitation { + /** + * The index of the last character of the URL citation in the message. + */ + end_index: number; + + /** + * The index of the first character of the URL citation in the message. + */ + start_index: number; + + /** + * The title of the web resource. + */ + title: string; + + /** + * The URL of the web resource. + */ + url: string; + } + } + /** * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a * function that should be called, as generated by the model. @@ -818,16 +908,6 @@ export interface ChatCompletionPredictionContent { type: 'content'; } -/** - * **o1 and o3-mini models only** - * - * Constrains effort on reasoning for - * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - * result in faster responses and fewer tokens used on reasoning in a response. - */ -export type ChatCompletionReasoningEffort = 'low' | 'medium' | 'high' | null; - /** * The role of the author of a message */ @@ -998,6 +1078,8 @@ export interface ChatCompletionUserMessageParam { */ export type CreateChatCompletionRequestMessage = ChatCompletionMessageParam; +export type ChatCompletionReasoningEffort = Shared.ReasoningEffort | null; + export type ChatCompletionCreateParams = | ChatCompletionCreateParamsNonStreaming | ChatCompletionCreateParamsStreaming; @@ -1014,11 +1096,13 @@ export interface ChatCompletionCreateParamsBase { messages: Array; /** - * ID of the model to use. See the - * [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) - * table for details on which models work with the Chat API. + * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * wide range of models with different capabilities, performance characteristics, + * and price points. Refer to the + * [model guide](https://platform.openai.com/docs/models) to browse and compare + * available models. */ - model: (string & {}) | ChatAPI.ChatModel; + model: (string & {}) | Shared.ChatModel; /** * Parameters for audio output. Required when audio output is requested with @@ -1107,8 +1191,8 @@ export interface ChatCompletionCreateParamsBase { metadata?: Shared.Metadata | null; /** - * Output types that you would like the model to generate for this request. Most - * models are capable of generating text, which is the default: + * Output types that you would like the model to generate. Most models are capable + * of generating text, which is the default: * * `["text"]` * @@ -1118,7 +1202,7 @@ export interface ChatCompletionCreateParamsBase { * * `["text", "audio"]` */ - modalities?: Array | null; + modalities?: Array<'text' | 'audio'> | null; /** * How many chat completion choices to generate for each input message. Note that @@ -1148,14 +1232,14 @@ export interface ChatCompletionCreateParamsBase { presence_penalty?: number | null; /** - * **o1 and o3-mini models only** + * **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: ChatCompletionReasoningEffort | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * An object specifying the format that the model must output. @@ -1165,21 +1249,14 @@ export interface ChatCompletionCreateParamsBase { * in the * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). * - * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - * message the model generates is valid JSON. - * - * **Important:** when using JSON mode, you **must** also instruct the model to - * produce JSON yourself via a system or user message. Without this, the model may - * generate an unending stream of whitespace until the generation reaches the token - * limit, resulting in a long-running and seemingly "stuck" request. Also note that - * the message content may be partially cut off if `finish_reason="length"`, which - * indicates the generation exceeded `max_tokens` or the conversation exceeded the - * max context length. + * Setting to `{ "type": "json_object" }` enables the older JSON mode, which + * ensures the message the model generates is valid JSON. Using `json_schema` is + * preferred for models that support it. */ response_format?: | Shared.ResponseFormatText - | Shared.ResponseFormatJSONObject - | Shared.ResponseFormatJSONSchema; + | Shared.ResponseFormatJSONSchema + | Shared.ResponseFormatJSONObject; /** * This feature is in Beta. If specified, our system will make a best effort to @@ -1198,15 +1275,19 @@ export interface ChatCompletionCreateParamsBase { * utilize scale tier credits until they are exhausted. * - If set to 'auto', and the Project is not Scale tier enabled, the request will * be processed using the default service tier with a lower uptime SLA and no - * latency guarantee. + * latency guarentee. * - If set to 'default', the request will be processed using the default service - * tier with a lower uptime SLA and no latency guarantee. + * tier with a lower uptime SLA and no latency guarentee. * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. */ service_tier?: 'auto' | 'default' | null; /** - * Up to 4 sequences where the API will stop generating further tokens. + * Up to 4 sequences where the API will stop generating further tokens. The + * returned text will not contain the stop sequence. */ stop?: string | null | Array; @@ -1218,12 +1299,14 @@ export interface ChatCompletionCreateParamsBase { store?: boolean | null; /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - * sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` - * message. - * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + * for more information, along with the + * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + * guide for more information on how to handle the streaming events. */ stream?: boolean | null; @@ -1282,6 +1365,13 @@ export interface ChatCompletionCreateParamsBase { * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; + + /** + * This tool searches the web for relevant results to use in a response. Learn more + * about the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + */ + web_search_options?: ChatCompletionCreateParams.WebSearchOptions; } export namespace ChatCompletionCreateParams { @@ -1313,6 +1403,70 @@ export namespace ChatCompletionCreateParams { parameters?: Shared.FunctionParameters; } + /** + * This tool searches the web for relevant results to use in a response. Learn more + * about the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + */ + export interface WebSearchOptions { + /** + * High level guidance for the amount of context window space to use for the + * search. One of `low`, `medium`, or `high`. `medium` is the default. + */ + search_context_size?: 'low' | 'medium' | 'high'; + + /** + * Approximate location parameters for the search. + */ + user_location?: WebSearchOptions.UserLocation | null; + } + + export namespace WebSearchOptions { + /** + * Approximate location parameters for the search. + */ + export interface UserLocation { + /** + * Approximate location parameters for the search. + */ + approximate: UserLocation.Approximate; + + /** + * The type of location approximation. Always `approximate`. + */ + type: 'approximate'; + } + + export namespace UserLocation { + /** + * Approximate location parameters for the search. + */ + export interface Approximate { + /** + * Free text input for the city of the user, e.g. `San Francisco`. + */ + city?: string; + + /** + * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + * the user, e.g. `US`. + */ + country?: string; + + /** + * Free text input for the region of the user, e.g. `California`. + */ + region?: string; + + /** + * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + * user, e.g. `America/Los_Angeles`. + */ + timezone?: string; + } + } + } + export type ChatCompletionCreateParamsNonStreaming = CompletionsCompletionsAPI.ChatCompletionCreateParamsNonStreaming; export type ChatCompletionCreateParamsStreaming = @@ -1326,12 +1480,14 @@ export type CompletionCreateParams = ChatCompletionCreateParams; export interface ChatCompletionCreateParamsNonStreaming extends ChatCompletionCreateParamsBase { /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - * sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` - * message. - * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + * for more information, along with the + * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + * guide for more information on how to handle the streaming events. */ stream?: false | null; } @@ -1343,12 +1499,14 @@ export type CompletionCreateParamsNonStreaming = ChatCompletionCreateParamsNonSt export interface ChatCompletionCreateParamsStreaming extends ChatCompletionCreateParamsBase { /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - * sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` - * message. - * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + * for more information, along with the + * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + * guide for more information on how to handle the streaming events. */ stream: true; } @@ -1377,19 +1535,19 @@ export type CompletionUpdateParams = ChatCompletionUpdateParams; export interface ChatCompletionListParams extends CursorPageParams { /** - * A list of metadata keys to filter the chat completions by. Example: + * A list of metadata keys to filter the Chat Completions by. Example: * * `metadata[key1]=value1&metadata[key2]=value2` */ metadata?: Shared.Metadata | null; /** - * The model used to generate the chat completions. + * The model used to generate the Chat Completions. */ model?: string; /** - * Sort order for chat completions by timestamp. Use `asc` for ascending order or + * Sort order for Chat Completions by timestamp. Use `asc` for ascending order or * `desc` for descending order. Defaults to `asc`. */ order?: 'asc' | 'desc'; @@ -1425,7 +1583,6 @@ export declare namespace Completions { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, @@ -1436,6 +1593,7 @@ export declare namespace Completions { type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type CompletionCreateParams as CompletionCreateParams, diff --git a/src/resources/chat/completions/index.ts b/src/resources/chat/completions/index.ts index 3691f41d8..994d6f880 100644 --- a/src/resources/chat/completions/index.ts +++ b/src/resources/chat/completions/index.ts @@ -24,7 +24,6 @@ export { type ChatCompletionModality, type ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort, type ChatCompletionRole, type ChatCompletionStoreMessage, type ChatCompletionStreamOptions, diff --git a/src/resources/chat/completions/messages.ts b/src/resources/chat/completions/messages.ts index fc1cc5d94..519a33aff 100644 --- a/src/resources/chat/completions/messages.ts +++ b/src/resources/chat/completions/messages.ts @@ -9,7 +9,7 @@ import { type CursorPageParams } from '../../../pagination'; export class Messages extends APIResource { /** - * Get the messages in a stored chat completion. Only chat completions that have + * Get the messages in a stored chat completion. Only Chat Completions that have * been created with the `store` parameter set to `true` will be returned. */ list( diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index a9b5b46fb..62ca758e0 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { Chat, type ChatModel } from './chat'; +export { Chat } from './chat'; export { ChatCompletionStoreMessagesPage, ChatCompletionsPage, @@ -25,7 +25,6 @@ export { type ChatCompletionModality, type ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort, type ChatCompletionRole, type ChatCompletionStoreMessage, type ChatCompletionStreamOptions, diff --git a/src/resources/files.ts b/src/resources/files.ts index f5f23dcad..723ac4cde 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -186,16 +186,12 @@ export interface FileObject { } /** - * The intended purpose of the uploaded file. - * - * Use "assistants" for - * [Assistants](https://platform.openai.com/docs/api-reference/assistants) and - * [Message](https://platform.openai.com/docs/api-reference/messages) files, - * "vision" for Assistants image file inputs, "batch" for - * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for - * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + * The intended purpose of the uploaded file. One of: - `assistants`: Used in the + * Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + * fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + * Flexible file type for any purpose - `evals`: Used for eval data sets */ -export type FilePurpose = 'assistants' | 'batch' | 'fine-tune' | 'vision'; +export type FilePurpose = 'assistants' | 'batch' | 'fine-tune' | 'vision' | 'user_data' | 'evals'; export interface FileCreateParams { /** @@ -204,14 +200,10 @@ export interface FileCreateParams { file: Core.Uploadable; /** - * The intended purpose of the uploaded file. - * - * Use "assistants" for - * [Assistants](https://platform.openai.com/docs/api-reference/assistants) and - * [Message](https://platform.openai.com/docs/api-reference/messages) files, - * "vision" for Assistants image file inputs, "batch" for - * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for - * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + * The intended purpose of the uploaded file. One of: - `assistants`: Used in the + * Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + * fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + * Flexible file type for any purpose - `evals`: Used for eval data sets */ purpose: FilePurpose; } diff --git a/src/resources/index.ts b/src/resources/index.ts index ad0302357..04c2c887b 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -60,4 +60,24 @@ export { type ModerationCreateResponse, type ModerationCreateParams, } from './moderations'; +export { Responses } from './responses/responses'; export { Uploads, type Upload, type UploadCreateParams, type UploadCompleteParams } from './uploads/uploads'; +export { + VectorStoresPage, + VectorStoreSearchResponsesPage, + VectorStores, + type AutoFileChunkingStrategyParam, + type FileChunkingStrategy, + type FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyObjectParam, + type VectorStore, + type VectorStoreDeleted, + type VectorStoreSearchResponse, + type VectorStoreCreateParams, + type VectorStoreUpdateParams, + type VectorStoreListParams, + type VectorStoreSearchParams, +} from './vector-stores/vector-stores'; diff --git a/src/resources/responses/index.ts b/src/resources/responses/index.ts new file mode 100644 index 000000000..84f761a93 --- /dev/null +++ b/src/resources/responses/index.ts @@ -0,0 +1,9 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + ResponseItemListDataPage, + InputItems, + type ResponseItemList, + type InputItemListParams, +} from './input-items'; +export { Responses } from './responses'; diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts new file mode 100644 index 000000000..9704be89a --- /dev/null +++ b/src/resources/responses/input-items.ts @@ -0,0 +1,276 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as ResponsesAPI from './responses'; +import { CursorPage, type CursorPageParams } from '../../pagination'; + +export class InputItems extends APIResource { + /** + * Returns a list of input items for a given response. + */ + list( + responseId: string, + query?: InputItemListParams, + options?: Core.RequestOptions, + ): Core.PagePromise< + ResponseItemListDataPage, + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput + >; + list( + responseId: string, + options?: Core.RequestOptions, + ): Core.PagePromise< + ResponseItemListDataPage, + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput + >; + list( + responseId: string, + query: InputItemListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise< + ResponseItemListDataPage, + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput + > { + if (isRequestOptions(query)) { + return this.list(responseId, {}, query); + } + return this._client.getAPIList(`/responses/${responseId}/input_items`, ResponseItemListDataPage, { + query, + ...options, + }); + } +} + +export class ResponseItemListDataPage extends CursorPage< + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput +> {} + +/** + * A list of Response items. + */ +export interface ResponseItemList { + /** + * A list of items used to generate this response. + */ + data: Array< + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput + >; + + /** + * The ID of the first item in the list. + */ + first_id: string; + + /** + * Whether there are more items available. + */ + has_more: boolean; + + /** + * The ID of the last item in the list. + */ + last_id: string; + + /** + * The type of object returned, must be `list`. + */ + object: 'list'; +} + +export namespace ResponseItemList { + export interface Message { + /** + * The unique ID of the message input. + */ + id: string; + + /** + * A list of one or many input items to the model, containing different content + * types. + */ + content: ResponsesAPI.ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The status of item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the message input. Always set to `message`. + */ + type?: 'message'; + } + + export interface ComputerCallOutput { + /** + * The unique ID of the computer call tool output. + */ + id: string; + + /** + * The ID of the computer tool call that produced the output. + */ + call_id: string; + + /** + * A computer screenshot image used with the computer use tool. + */ + output: ComputerCallOutput.Output; + + /** + * The type of the computer tool call output. Always `computer_call_output`. + */ + type: 'computer_call_output'; + + /** + * The safety checks reported by the API that have been acknowledged by the + * developer. + */ + acknowledged_safety_checks?: Array; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace ComputerCallOutput { + /** + * A computer screenshot image used with the computer use tool. + */ + export interface Output { + /** + * Specifies the event type. For a computer screenshot, this property is always set + * to `computer_screenshot`. + */ + type: 'computer_screenshot'; + + /** + * The identifier of an uploaded file that contains the screenshot. + */ + file_id?: string; + + /** + * The URL of the screenshot image. + */ + image_url?: string; + } + + /** + * A pending safety check for the computer call. + */ + export interface AcknowledgedSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } + } + + export interface FunctionCallOutput { + /** + * The unique ID of the function call tool output. + */ + id: string; + + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * A JSON string of the output of the function tool call. + */ + output: string; + + /** + * The type of the function tool call output. Always `function_call_output`. + */ + type: 'function_call_output'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } +} + +export interface InputItemListParams extends CursorPageParams { + /** + * An item ID to list items before, used in pagination. + */ + before?: string; + + /** + * The order to return the input items in. Default is `asc`. + * + * - `asc`: Return the input items in ascending order. + * - `desc`: Return the input items in descending order. + */ + order?: 'asc' | 'desc'; +} + +InputItems.ResponseItemListDataPage = ResponseItemListDataPage; + +export declare namespace InputItems { + export { + type ResponseItemList as ResponseItemList, + ResponseItemListDataPage as ResponseItemListDataPage, + type InputItemListParams as InputItemListParams, + }; +} diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts new file mode 100644 index 000000000..2ad146873 --- /dev/null +++ b/src/resources/responses/responses.ts @@ -0,0 +1,2761 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { + type ExtractParsedContentFromParams, + parseResponse, + type ResponseCreateParamsWithTools, + addOutputText, +} from '../../lib/ResponsesParser'; +import * as Core from '../../core'; +import { APIPromise, isRequestOptions } from '../../core'; +import { APIResource } from '../../resource'; +import { Stream } from '../../streaming'; +import * as Shared from '../shared'; +import * as InputItemsAPI from './input-items'; +import { InputItemListParams, InputItems, ResponseItemList, ResponseItemListDataPage } from './input-items'; +import * as ResponsesAPI from './responses'; +import { ResponseStream, ResponseStreamParams } from '../../lib/responses/ResponseStream'; + +export interface ParsedResponseOutputText extends ResponseOutputText { + parsed: ParsedT | null; +} + +export type ParsedContent = ParsedResponseOutputText | ResponseOutputRefusal; + +export interface ParsedResponseOutputMessage extends ResponseOutputMessage { + content: ParsedContent[]; +} + +export interface ParsedResponseFunctionToolCall extends ResponseFunctionToolCall { + parsed_arguments: any; +} + +export type ParsedResponseOutputItem = + | ParsedResponseOutputMessage + | ParsedResponseFunctionToolCall + | ResponseFileSearchToolCall + | ResponseFunctionWebSearch + | ResponseComputerToolCall + | ResponseOutputItem.Reasoning; + +export interface ParsedResponse extends Response { + output: Array>; + + output_parsed: ParsedT | null; +} + +export type ResponseParseParams = ResponseCreateParamsNonStreaming; +export class Responses extends APIResource { + inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client); + + /** + * Creates a model response. Provide + * [text](https://platform.openai.com/docs/guides/text) or + * [image](https://platform.openai.com/docs/guides/images) inputs to generate + * [text](https://platform.openai.com/docs/guides/text) or + * [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + * the model call your own + * [custom code](https://platform.openai.com/docs/guides/function-calling) or use + * built-in [tools](https://platform.openai.com/docs/guides/tools) like + * [web search](https://platform.openai.com/docs/guides/tools-web-search) or + * [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + * your own data as input for the model's response. + */ + create(body: ResponseCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; + create( + body: ResponseCreateParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + create( + body: ResponseCreateParamsBase, + options?: Core.RequestOptions, + ): APIPromise | Response>; + create( + body: ResponseCreateParams, + options?: Core.RequestOptions, + ): APIPromise | APIPromise> { + return ( + this._client.post('/responses', { body, ...options, stream: body.stream ?? false }) as + | APIPromise + | APIPromise> + )._thenUnwrap((rsp) => { + if ('type' in rsp && rsp.type === 'response') { + addOutputText(rsp as Response); + } + + return rsp; + }) as APIPromise | APIPromise>; + } + + /** + * Retrieves a model response with the given ID. + */ + retrieve( + responseId: string, + query?: ResponseRetrieveParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + retrieve(responseId: string, options?: Core.RequestOptions): Core.APIPromise; + retrieve( + responseId: string, + query: ResponseRetrieveParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.retrieve(responseId, {}, query); + } + return this._client.get(`/responses/${responseId}`, { query, ...options }); + } + + /** + * Deletes a model response with the given ID. + */ + del(responseId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/responses/${responseId}`, { + ...options, + headers: { Accept: '*/*', ...options?.headers }, + }); + } + + parse>( + body: Params, + options?: Core.RequestOptions, + ): Core.APIPromise> { + return this._client.responses + .create(body, options) + ._thenUnwrap((response) => parseResponse(response as Response, body)); + } + + /** + * Creates a chat completion stream + */ + stream>( + body: Params, + options?: Core.RequestOptions, + ): ResponseStream { + return ResponseStream.createResponse(this._client, body, options); + } +} + +/** + * A tool that controls a virtual computer. Learn more about the + * [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). + */ +export interface ComputerTool { + /** + * The height of the computer display. + */ + display_height: number; + + /** + * The width of the computer display. + */ + display_width: number; + + /** + * The type of computer environment to control. + */ + environment: 'mac' | 'windows' | 'ubuntu' | 'browser'; + + /** + * The type of the computer use tool. Always `computer_use_preview`. + */ + type: 'computer-preview'; +} + +/** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ +export interface EasyInputMessage { + /** + * Text, image, or audio input to the model, used to generate a response. Can also + * contain previous assistant responses. + */ + content: string | ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; +} + +/** + * A tool that searches for relevant content from uploaded files. Learn more about + * the + * [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + */ +export interface FileSearchTool { + /** + * The type of the file search tool. Always `file_search`. + */ + type: 'file_search'; + + /** + * The IDs of the vector stores to search. + */ + vector_store_ids: Array; + + /** + * A filter to apply based on file attributes. + */ + filters?: Shared.ComparisonFilter | Shared.CompoundFilter; + + /** + * The maximum number of results to return. This number should be between 1 and 50 + * inclusive. + */ + max_num_results?: number; + + /** + * Ranking options for search. + */ + ranking_options?: FileSearchTool.RankingOptions; +} + +export namespace FileSearchTool { + /** + * Ranking options for search. + */ + export interface RankingOptions { + /** + * The ranker to use for the file search. + */ + ranker?: 'auto' | 'default-2024-11-15'; + + /** + * The score threshold for the file search, a number between 0 and 1. Numbers + * closer to 1 will attempt to return only the most relevant results, but may + * return fewer results. + */ + score_threshold?: number; + } +} + +/** + * Defines a function in your own code the model can choose to call. Learn more + * about + * [function calling](https://platform.openai.com/docs/guides/function-calling). + */ +export interface FunctionTool { + /** + * The name of the function to call. + */ + name: string; + + /** + * A JSON schema object describing the parameters of the function. + */ + parameters: Record; + + /** + * Whether to enforce strict parameter validation. Default `true`. + */ + strict: boolean; + + /** + * The type of the function tool. Always `function`. + */ + type: 'function'; + + /** + * A description of the function. Used by the model to determine whether or not to + * call the function. + */ + description?: string | null; +} + +export interface Response { + /** + * Unique identifier for this Response. + */ + id: string; + + /** + * Unix timestamp (in seconds) of when this Response was created. + */ + created_at: number; + + output_text: string; + + /** + * An error object returned when the model fails to generate a Response. + */ + error: ResponseError | null; + + /** + * Details about why the response is incomplete. + */ + incomplete_details: Response.IncompleteDetails | null; + + /** + * Inserts a system (or developer) message as the first item in the model's + * context. + * + * When using along with `previous_response_id`, the instructions from a previous + * response will be not be carried over to the next response. This makes it simple + * to swap out system (or developer) messages in new responses. + */ + instructions: string | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * wide range of models with different capabilities, performance characteristics, + * and price points. Refer to the + * [model guide](https://platform.openai.com/docs/models) to browse and compare + * available models. + */ + model: (string & {}) | Shared.ChatModel; + + /** + * The object type of this resource - always set to `response`. + */ + object: 'response'; + + /** + * An array of content items generated by the model. + * + * - The length and order of items in the `output` array is dependent on the + * model's response. + * - Rather than accessing the first item in the `output` array and assuming it's + * an `assistant` message with the content generated by the model, you might + * consider using the `output_text` property where supported in SDKs. + */ + output: Array; + + /** + * Whether to allow the model to run tool calls in parallel. + */ + parallel_tool_calls: boolean; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. We generally recommend altering this or `top_p` but + * not both. + */ + temperature: number | null; + + /** + * How the model should select which tool (or tools) to use when generating a + * response. See the `tools` parameter to see how to specify which tools the model + * can call. + */ + tool_choice: ToolChoiceOptions | ToolChoiceTypes | ToolChoiceFunction; + + /** + * An array of tools the model may call while generating a response. You can + * specify which tool to use by setting the `tool_choice` parameter. + * + * The two categories of tools you can provide the model are: + * + * - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + * capabilities, like + * [web search](https://platform.openai.com/docs/guides/tools-web-search) or + * [file search](https://platform.openai.com/docs/guides/tools-file-search). + * Learn more about + * [built-in tools](https://platform.openai.com/docs/guides/tools). + * - **Function calls (custom tools)**: Functions that are defined by you, enabling + * the model to call your own code. Learn more about + * [function calling](https://platform.openai.com/docs/guides/function-calling). + */ + tools: Array; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p: number | null; + + /** + * An upper bound for the number of tokens that can be generated for a response, + * including visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + max_output_tokens?: number | null; + + /** + * The unique ID of the previous response to the model. Use this to create + * multi-turn conversations. Learn more about + * [conversation state](https://platform.openai.com/docs/guides/conversation-state). + */ + previous_response_id?: string | null; + + /** + * **o-series models only** + * + * Configuration options for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). + */ + reasoning?: Shared.Reasoning | null; + + /** + * The status of the response generation. One of `completed`, `failed`, + * `in_progress`, or `incomplete`. + */ + status?: ResponseStatus; + + /** + * Configuration options for a text response from the model. Can be plain text or + * structured JSON data. Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + */ + text?: ResponseTextConfig; + + /** + * The truncation strategy to use for the model response. + * + * - `auto`: If the context of this response and previous ones exceeds the model's + * context window size, the model will truncate the response to fit the context + * window by dropping input items in the middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window size + * for a model, the request will fail with a 400 error. + */ + truncation?: 'auto' | 'disabled' | null; + + /** + * Represents token usage details including input tokens, output tokens, a + * breakdown of output tokens, and the total tokens used. + */ + usage?: ResponseUsage; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor + * and detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + user?: string; +} + +export namespace Response { + /** + * Details about why the response is incomplete. + */ + export interface IncompleteDetails { + /** + * The reason why the response is incomplete. + */ + reason?: 'max_output_tokens' | 'content_filter'; + } +} + +/** + * Emitted when there is a partial audio response. + */ +export interface ResponseAudioDeltaEvent { + /** + * A chunk of Base64 encoded response audio bytes. + */ + delta: string; + + /** + * The type of the event. Always `response.audio.delta`. + */ + type: 'response.audio.delta'; +} + +/** + * Emitted when the audio response is complete. + */ +export interface ResponseAudioDoneEvent { + /** + * The type of the event. Always `response.audio.done`. + */ + type: 'response.audio.done'; +} + +/** + * Emitted when there is a partial transcript of audio. + */ +export interface ResponseAudioTranscriptDeltaEvent { + /** + * The partial transcript of the audio response. + */ + delta: string; + + /** + * The type of the event. Always `response.audio.transcript.delta`. + */ + type: 'response.audio.transcript.delta'; +} + +/** + * Emitted when the full audio transcript is completed. + */ +export interface ResponseAudioTranscriptDoneEvent { + /** + * The type of the event. Always `response.audio.transcript.done`. + */ + type: 'response.audio.transcript.done'; +} + +/** + * Emitted when a partial code snippet is added by the code interpreter. + */ +export interface ResponseCodeInterpreterCallCodeDeltaEvent { + /** + * The partial code snippet added by the code interpreter. + */ + delta: string; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.code.delta`. + */ + type: 'response.code_interpreter_call.code.delta'; +} + +/** + * Emitted when code snippet output is finalized by the code interpreter. + */ +export interface ResponseCodeInterpreterCallCodeDoneEvent { + /** + * The final code snippet output by the code interpreter. + */ + code: string; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.code.done`. + */ + type: 'response.code_interpreter_call.code.done'; +} + +/** + * Emitted when the code interpreter call is completed. + */ +export interface ResponseCodeInterpreterCallCompletedEvent { + /** + * A tool call to run code. + */ + code_interpreter_call: ResponseCodeInterpreterToolCall; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.completed`. + */ + type: 'response.code_interpreter_call.completed'; +} + +/** + * Emitted when a code interpreter call is in progress. + */ +export interface ResponseCodeInterpreterCallInProgressEvent { + /** + * A tool call to run code. + */ + code_interpreter_call: ResponseCodeInterpreterToolCall; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.in_progress`. + */ + type: 'response.code_interpreter_call.in_progress'; +} + +/** + * Emitted when the code interpreter is actively interpreting the code snippet. + */ +export interface ResponseCodeInterpreterCallInterpretingEvent { + /** + * A tool call to run code. + */ + code_interpreter_call: ResponseCodeInterpreterToolCall; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.interpreting`. + */ + type: 'response.code_interpreter_call.interpreting'; +} + +/** + * A tool call to run code. + */ +export interface ResponseCodeInterpreterToolCall { + /** + * The unique ID of the code interpreter tool call. + */ + id: string; + + /** + * The code to run. + */ + code: string; + + /** + * The results of the code interpreter tool call. + */ + results: Array; + + /** + * The status of the code interpreter tool call. + */ + status: 'in_progress' | 'interpreting' | 'completed'; + + /** + * The type of the code interpreter tool call. Always `code_interpreter_call`. + */ + type: 'code_interpreter_call'; +} + +export namespace ResponseCodeInterpreterToolCall { + /** + * The output of a code interpreter tool call that is text. + */ + export interface Logs { + /** + * The logs of the code interpreter tool call. + */ + logs: string; + + /** + * The type of the code interpreter text output. Always `logs`. + */ + type: 'logs'; + } + + /** + * The output of a code interpreter tool call that is a file. + */ + export interface Files { + files: Array; + + /** + * The type of the code interpreter file output. Always `files`. + */ + type: 'files'; + } + + export namespace Files { + export interface File { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The MIME type of the file. + */ + mime_type: string; + } + } +} + +/** + * Emitted when the model response is complete. + */ +export interface ResponseCompletedEvent { + /** + * Properties of the completed response. + */ + response: Response; + + /** + * The type of the event. Always `response.completed`. + */ + type: 'response.completed'; +} + +/** + * A tool call to a computer use tool. See the + * [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) + * for more information. + */ +export interface ResponseComputerToolCall { + /** + * The unique ID of the computer call. + */ + id: string; + + /** + * A click action. + */ + action: + | ResponseComputerToolCall.Click + | ResponseComputerToolCall.DoubleClick + | ResponseComputerToolCall.Drag + | ResponseComputerToolCall.Keypress + | ResponseComputerToolCall.Move + | ResponseComputerToolCall.Screenshot + | ResponseComputerToolCall.Scroll + | ResponseComputerToolCall.Type + | ResponseComputerToolCall.Wait; + + /** + * An identifier used when responding to the tool call with output. + */ + call_id: string; + + /** + * The pending safety checks for the computer call. + */ + pending_safety_checks: Array; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the computer call. Always `computer_call`. + */ + type: 'computer_call'; +} + +export namespace ResponseComputerToolCall { + /** + * A click action. + */ + export interface Click { + /** + * Indicates which mouse button was pressed during the click. One of `left`, + * `right`, `wheel`, `back`, or `forward`. + */ + button: 'left' | 'right' | 'wheel' | 'back' | 'forward'; + + /** + * Specifies the event type. For a click action, this property is always set to + * `click`. + */ + type: 'click'; + + /** + * The x-coordinate where the click occurred. + */ + x: number; + + /** + * The y-coordinate where the click occurred. + */ + y: number; + } + + /** + * A double click action. + */ + export interface DoubleClick { + /** + * Specifies the event type. For a double click action, this property is always set + * to `double_click`. + */ + type: 'double_click'; + + /** + * The x-coordinate where the double click occurred. + */ + x: number; + + /** + * The y-coordinate where the double click occurred. + */ + y: number; + } + + /** + * A drag action. + */ + export interface Drag { + /** + * An array of coordinates representing the path of the drag action. Coordinates + * will appear as an array of objects, eg + * + * ``` + * [ + * { x: 100, y: 200 }, + * { x: 200, y: 300 } + * ] + * ``` + */ + path: Array; + + /** + * Specifies the event type. For a drag action, this property is always set to + * `drag`. + */ + type: 'drag'; + } + + export namespace Drag { + /** + * A series of x/y coordinate pairs in the drag path. + */ + export interface Path { + /** + * The x-coordinate. + */ + x: number; + + /** + * The y-coordinate. + */ + y: number; + } + } + + /** + * A collection of keypresses the model would like to perform. + */ + export interface Keypress { + /** + * The combination of keys the model is requesting to be pressed. This is an array + * of strings, each representing a key. + */ + keys: Array; + + /** + * Specifies the event type. For a keypress action, this property is always set to + * `keypress`. + */ + type: 'keypress'; + } + + /** + * A mouse move action. + */ + export interface Move { + /** + * Specifies the event type. For a move action, this property is always set to + * `move`. + */ + type: 'move'; + + /** + * The x-coordinate to move to. + */ + x: number; + + /** + * The y-coordinate to move to. + */ + y: number; + } + + /** + * A screenshot action. + */ + export interface Screenshot { + /** + * Specifies the event type. For a screenshot action, this property is always set + * to `screenshot`. + */ + type: 'screenshot'; + } + + /** + * A scroll action. + */ + export interface Scroll { + /** + * The horizontal scroll distance. + */ + scroll_x: number; + + /** + * The vertical scroll distance. + */ + scroll_y: number; + + /** + * Specifies the event type. For a scroll action, this property is always set to + * `scroll`. + */ + type: 'scroll'; + + /** + * The x-coordinate where the scroll occurred. + */ + x: number; + + /** + * The y-coordinate where the scroll occurred. + */ + y: number; + } + + /** + * An action to type in text. + */ + export interface Type { + /** + * The text to type. + */ + text: string; + + /** + * Specifies the event type. For a type action, this property is always set to + * `type`. + */ + type: 'type'; + } + + /** + * A wait action. + */ + export interface Wait { + /** + * Specifies the event type. For a wait action, this property is always set to + * `wait`. + */ + type: 'wait'; + } + + /** + * A pending safety check for the computer call. + */ + export interface PendingSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } +} + +/** + * Multi-modal input and output contents. + */ +export type ResponseContent = + | ResponseInputText + | ResponseInputImage + | ResponseInputFile + | ResponseOutputText + | ResponseOutputRefusal; + +/** + * Emitted when a new content part is added. + */ +export interface ResponseContentPartAddedEvent { + /** + * The index of the content part that was added. + */ + content_index: number; + + /** + * The ID of the output item that the content part was added to. + */ + item_id: string; + + /** + * The index of the output item that the content part was added to. + */ + output_index: number; + + /** + * The content part that was added. + */ + part: ResponseOutputText | ResponseOutputRefusal; + + /** + * The type of the event. Always `response.content_part.added`. + */ + type: 'response.content_part.added'; +} + +/** + * Emitted when a content part is done. + */ +export interface ResponseContentPartDoneEvent { + /** + * The index of the content part that is done. + */ + content_index: number; + + /** + * The ID of the output item that the content part was added to. + */ + item_id: string; + + /** + * The index of the output item that the content part was added to. + */ + output_index: number; + + /** + * The content part that is done. + */ + part: ResponseOutputText | ResponseOutputRefusal; + + /** + * The type of the event. Always `response.content_part.done`. + */ + type: 'response.content_part.done'; +} + +/** + * An event that is emitted when a response is created. + */ +export interface ResponseCreatedEvent { + /** + * The response that was created. + */ + response: Response; + + /** + * The type of the event. Always `response.created`. + */ + type: 'response.created'; +} + +/** + * An error object returned when the model fails to generate a Response. + */ +export interface ResponseError { + /** + * The error code for the response. + */ + code: + | 'server_error' + | 'rate_limit_exceeded' + | 'invalid_prompt' + | 'vector_store_timeout' + | 'invalid_image' + | 'invalid_image_format' + | 'invalid_base64_image' + | 'invalid_image_url' + | 'image_too_large' + | 'image_too_small' + | 'image_parse_error' + | 'image_content_policy_violation' + | 'invalid_image_mode' + | 'image_file_too_large' + | 'unsupported_image_media_type' + | 'empty_image_file' + | 'failed_to_download_image' + | 'image_file_not_found'; + + /** + * A human-readable description of the error. + */ + message: string; +} + +/** + * Emitted when an error occurs. + */ +export interface ResponseErrorEvent { + /** + * The error code. + */ + code: string | null; + + /** + * The error message. + */ + message: string; + + /** + * The error parameter. + */ + param: string | null; + + /** + * The type of the event. Always `error`. + */ + type: 'error'; +} + +/** + * An event that is emitted when a response fails. + */ +export interface ResponseFailedEvent { + /** + * The response that failed. + */ + response: Response; + + /** + * The type of the event. Always `response.failed`. + */ + type: 'response.failed'; +} + +/** + * Emitted when a file search call is completed (results found). + */ +export interface ResponseFileSearchCallCompletedEvent { + /** + * The ID of the output item that the file search call is initiated. + */ + item_id: string; + + /** + * The index of the output item that the file search call is initiated. + */ + output_index: number; + + /** + * The type of the event. Always `response.file_search_call.completed`. + */ + type: 'response.file_search_call.completed'; +} + +/** + * Emitted when a file search call is initiated. + */ +export interface ResponseFileSearchCallInProgressEvent { + /** + * The ID of the output item that the file search call is initiated. + */ + item_id: string; + + /** + * The index of the output item that the file search call is initiated. + */ + output_index: number; + + /** + * The type of the event. Always `response.file_search_call.in_progress`. + */ + type: 'response.file_search_call.in_progress'; +} + +/** + * Emitted when a file search is currently searching. + */ +export interface ResponseFileSearchCallSearchingEvent { + /** + * The ID of the output item that the file search call is initiated. + */ + item_id: string; + + /** + * The index of the output item that the file search call is searching. + */ + output_index: number; + + /** + * The type of the event. Always `response.file_search_call.searching`. + */ + type: 'response.file_search_call.searching'; +} + +/** + * The results of a file search tool call. See the + * [file search guide](https://platform.openai.com/docs/guides/tools-file-search) + * for more information. + */ +export interface ResponseFileSearchToolCall { + /** + * The unique ID of the file search tool call. + */ + id: string; + + /** + * The queries used to search for files. + */ + queries: Array; + + /** + * The status of the file search tool call. One of `in_progress`, `searching`, + * `incomplete` or `failed`, + */ + status: 'in_progress' | 'searching' | 'completed' | 'incomplete' | 'failed'; + + /** + * The type of the file search tool call. Always `file_search_call`. + */ + type: 'file_search_call'; + + /** + * The results of the file search tool call. + */ + results?: Array | null; +} + +export namespace ResponseFileSearchToolCall { + export interface Result { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + + /** + * The unique ID of the file. + */ + file_id?: string; + + /** + * The name of the file. + */ + filename?: string; + + /** + * The relevance score of the file - a value between 0 and 1. + */ + score?: number; + + /** + * The text that was retrieved from the file. + */ + text?: string; + } +} + +/** + * An object specifying the format that the model must output. + * + * Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + * ensures the model will match your supplied JSON schema. Learn more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * The default format is `{ "type": "text" }` with no additional options. + * + * **Not recommended for gpt-4o and newer models:** + * + * Setting to `{ "type": "json_object" }` enables the older JSON mode, which + * ensures the message the model generates is valid JSON. Using `json_schema` is + * preferred for models that support it. + */ +export type ResponseFormatTextConfig = + | Shared.ResponseFormatText + | ResponseFormatTextJSONSchemaConfig + | Shared.ResponseFormatJSONObject; + +/** + * JSON Schema response format. Used to generate structured JSON responses. Learn + * more about + * [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + */ +export interface ResponseFormatTextJSONSchemaConfig { + /** + * The schema for the response format, described as a JSON Schema object. Learn how + * to build JSON schemas [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of response format being defined. Always `json_schema`. + */ + type: 'json_schema'; + + /** + * A description of what the response format is for, used by the model to determine + * how to respond in the format. + */ + description?: string; + + /** + * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + * and dashes, with a maximum length of 64. + */ + name?: string; + + /** + * Whether to enable strict schema adherence when generating the output. If set to + * true, the model will always follow the exact schema defined in the `schema` + * field. Only a subset of JSON Schema is supported when `strict` is `true`. To + * learn more, read the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + */ + strict?: boolean | null; +} + +/** + * Emitted when there is a partial function-call arguments delta. + */ +export interface ResponseFunctionCallArgumentsDeltaEvent { + /** + * The function-call arguments delta that is added. + */ + delta: string; + + /** + * The ID of the output item that the function-call arguments delta is added to. + */ + item_id: string; + + /** + * The index of the output item that the function-call arguments delta is added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.function_call_arguments.delta`. + */ + type: 'response.function_call_arguments.delta'; +} + +/** + * Emitted when function-call arguments are finalized. + */ +export interface ResponseFunctionCallArgumentsDoneEvent { + /** + * The function-call arguments. + */ + arguments: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item. + */ + output_index: number; + + type: 'response.function_call_arguments.done'; +} + +/** + * A tool call to run a function. See the + * [function calling guide](https://platform.openai.com/docs/guides/function-calling) + * for more information. + */ +export interface ResponseFunctionToolCall { + /** + * The unique ID of the function tool call. + */ + id: string; + + /** + * A JSON string of the arguments to pass to the function. + */ + arguments: string; + + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * The name of the function to run. + */ + name: string; + + /** + * The type of the function tool call. Always `function_call`. + */ + type: 'function_call'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + +/** + * The results of a web search tool call. See the + * [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for + * more information. + */ +export interface ResponseFunctionWebSearch { + /** + * The unique ID of the web search tool call. + */ + id: string; + + /** + * The status of the web search tool call. + */ + status: 'in_progress' | 'searching' | 'completed' | 'failed'; + + /** + * The type of the web search tool call. Always `web_search_call`. + */ + type: 'web_search_call'; +} + +/** + * Emitted when the response is in progress. + */ +export interface ResponseInProgressEvent { + /** + * The response that is in progress. + */ + response: Response; + + /** + * The type of the event. Always `response.in_progress`. + */ + type: 'response.in_progress'; +} + +/** + * Specify additional output data to include in the model response. Currently + * supported values are: + * + * - `file_search_call.results`: Include the search results of the file search tool + * call. + * - `message.input_image.image_url`: Include image urls from the input message. + * - `computer_call_output.output.image_url`: Include image urls from the computer + * call output. + */ +export type ResponseIncludable = + | 'file_search_call.results' + | 'message.input_image.image_url' + | 'computer_call_output.output.image_url'; + +/** + * An event that is emitted when a response finishes as incomplete. + */ +export interface ResponseIncompleteEvent { + /** + * The response that was incomplete. + */ + response: Response; + + /** + * The type of the event. Always `response.incomplete`. + */ + type: 'response.incomplete'; +} + +/** + * A list of one or many input items to the model, containing different content + * types. + */ +export type ResponseInput = Array; + +/** + * An audio input to the model. + */ +export interface ResponseInputAudio { + /** + * Base64-encoded audio data. + */ + data: string; + + /** + * The format of the audio data. Currently supported formats are `mp3` and `wav`. + */ + format: 'mp3' | 'wav'; + + /** + * The type of the input item. Always `input_audio`. + */ + type: 'input_audio'; +} + +/** + * A text input to the model. + */ +export type ResponseInputContent = ResponseInputText | ResponseInputImage | ResponseInputFile; + +/** + * A file input to the model. + */ +export interface ResponseInputFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The content of the file to be sent to the model. + */ + file_data?: string; + + /** + * The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; +} + +/** + * An image input to the model. Learn about + * [image inputs](https://platform.openai.com/docs/guides/vision). + */ +export interface ResponseInputImage { + /** + * The detail level of the image to be sent to the model. One of `high`, `low`, or + * `auto`. Defaults to `auto`. + */ + detail: 'high' | 'low' | 'auto'; + + /** + * The type of the input item. Always `input_image`. + */ + type: 'input_image'; + + /** + * The ID of the file to be sent to the model. + */ + file_id?: string | null; + + /** + * The URL of the image to be sent to the model. A fully qualified URL or base64 + * encoded image in a data URL. + */ + image_url?: string | null; +} + +/** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ +export type ResponseInputItem = + | EasyInputMessage + | ResponseInputItem.Message + | ResponseOutputMessage + | ResponseFileSearchToolCall + | ResponseComputerToolCall + | ResponseInputItem.ComputerCallOutput + | ResponseFunctionWebSearch + | ResponseFunctionToolCall + | ResponseInputItem.FunctionCallOutput + | ResponseInputItem.Reasoning + | ResponseInputItem.ItemReference; + +export namespace ResponseInputItem { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. + */ + export interface Message { + /** + * A list of one or many input items to the model, containing different content + * types. + */ + content: ResponsesAPI.ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The status of item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the message input. Always set to `message`. + */ + type?: 'message'; + } + + /** + * The output of a computer tool call. + */ + export interface ComputerCallOutput { + /** + * The ID of the computer tool call that produced the output. + */ + call_id: string; + + /** + * A computer screenshot image used with the computer use tool. + */ + output: ComputerCallOutput.Output; + + /** + * The type of the computer tool call output. Always `computer_call_output`. + */ + type: 'computer_call_output'; + + /** + * The ID of the computer tool call output. + */ + id?: string; + + /** + * The safety checks reported by the API that have been acknowledged by the + * developer. + */ + acknowledged_safety_checks?: Array; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace ComputerCallOutput { + /** + * A computer screenshot image used with the computer use tool. + */ + export interface Output { + /** + * Specifies the event type. For a computer screenshot, this property is always set + * to `computer_screenshot`. + */ + type: 'computer_screenshot'; + + /** + * The identifier of an uploaded file that contains the screenshot. + */ + file_id?: string; + + /** + * The URL of the screenshot image. + */ + image_url?: string; + } + + /** + * A pending safety check for the computer call. + */ + export interface AcknowledgedSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } + } + + /** + * The output of a function tool call. + */ + export interface FunctionCallOutput { + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * A JSON string of the output of the function tool call. + */ + output: string; + + /** + * The type of the function tool call output. Always `function_call_output`. + */ + type: 'function_call_output'; + + /** + * The unique ID of the function tool call output. Populated when this item is + * returned via API. + */ + id?: string; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + /** + * A description of the chain of thought used by a reasoning model while generating + * a response. + */ + export interface Reasoning { + /** + * The unique identifier of the reasoning content. + */ + id: string; + + /** + * Reasoning text contents. + */ + content: Array; + + /** + * The type of the object. Always `reasoning`. + */ + type: 'reasoning'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace Reasoning { + export interface Content { + /** + * A short summary of the reasoning used by the model when generating the response. + */ + text: string; + + /** + * The type of the object. Always `text`. + */ + type: 'reasoning_summary'; + } + } + + /** + * An internal identifier for an item to reference. + */ + export interface ItemReference { + /** + * The ID of the item to reference. + */ + id: string; + + /** + * The type of item to reference. Always `item_reference`. + */ + type: 'item_reference'; + } +} + +/** + * A list of one or many input items to the model, containing different content + * types. + */ +export type ResponseInputMessageContentList = Array; + +/** + * A text input to the model. + */ +export interface ResponseInputText { + /** + * The text input to the model. + */ + text: string; + + /** + * The type of the input item. Always `input_text`. + */ + type: 'input_text'; +} + +/** + * An audio output from the model. + */ +export interface ResponseOutputAudio { + /** + * Base64-encoded audio data from the model. + */ + data: string; + + /** + * The transcript of the audio data from the model. + */ + transcript: string; + + /** + * The type of the output audio. Always `output_audio`. + */ + type: 'output_audio'; +} + +/** + * An output message from the model. + */ +export type ResponseOutputItem = + | ResponseOutputMessage + | ResponseFileSearchToolCall + | ResponseFunctionToolCall + | ResponseFunctionWebSearch + | ResponseComputerToolCall + | ResponseOutputItem.Reasoning; + +export namespace ResponseOutputItem { + /** + * A description of the chain of thought used by a reasoning model while generating + * a response. + */ + export interface Reasoning { + /** + * The unique identifier of the reasoning content. + */ + id: string; + + /** + * Reasoning text contents. + */ + content: Array; + + /** + * The type of the object. Always `reasoning`. + */ + type: 'reasoning'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace Reasoning { + export interface Content { + /** + * A short summary of the reasoning used by the model when generating the response. + */ + text: string; + + /** + * The type of the object. Always `text`. + */ + type: 'reasoning_summary'; + } + } +} + +/** + * Emitted when a new output item is added. + */ +export interface ResponseOutputItemAddedEvent { + /** + * The output item that was added. + */ + item: ResponseOutputItem; + + /** + * The index of the output item that was added. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_item.added`. + */ + type: 'response.output_item.added'; +} + +/** + * Emitted when an output item is marked done. + */ +export interface ResponseOutputItemDoneEvent { + /** + * The output item that was marked done. + */ + item: ResponseOutputItem; + + /** + * The index of the output item that was marked done. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_item.done`. + */ + type: 'response.output_item.done'; +} + +/** + * An output message from the model. + */ +export interface ResponseOutputMessage { + /** + * The unique ID of the output message. + */ + id: string; + + /** + * The content of the output message. + */ + content: Array; + + /** + * The role of the output message. Always `assistant`. + */ + role: 'assistant'; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the output message. Always `message`. + */ + type: 'message'; +} + +/** + * A refusal from the model. + */ +export interface ResponseOutputRefusal { + /** + * The refusal explanationfrom the model. + */ + refusal: string; + + /** + * The type of the refusal. Always `refusal`. + */ + type: 'refusal'; +} + +/** + * A text output from the model. + */ +export interface ResponseOutputText { + /** + * The annotations of the text output. + */ + annotations: Array< + ResponseOutputText.FileCitation | ResponseOutputText.URLCitation | ResponseOutputText.FilePath + >; + + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; +} + +export namespace ResponseOutputText { + /** + * A citation to a file. + */ + export interface FileCitation { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file citation. Always `file_citation`. + */ + type: 'file_citation'; + } + + /** + * A citation for a web resource used to generate a model response. + */ + export interface URLCitation { + /** + * The index of the last character of the URL citation in the message. + */ + end_index: number; + + /** + * The index of the first character of the URL citation in the message. + */ + start_index: number; + + /** + * The title of the web resource. + */ + title: string; + + /** + * The type of the URL citation. Always `url_citation`. + */ + type: 'url_citation'; + + /** + * The URL of the web resource. + */ + url: string; + } + + /** + * A path to a file. + */ + export interface FilePath { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file path. Always `file_path`. + */ + type: 'file_path'; + } +} + +/** + * Emitted when there is a partial refusal text. + */ +export interface ResponseRefusalDeltaEvent { + /** + * The index of the content part that the refusal text is added to. + */ + content_index: number; + + /** + * The refusal text that is added. + */ + delta: string; + + /** + * The ID of the output item that the refusal text is added to. + */ + item_id: string; + + /** + * The index of the output item that the refusal text is added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.refusal.delta`. + */ + type: 'response.refusal.delta'; +} + +/** + * Emitted when refusal text is finalized. + */ +export interface ResponseRefusalDoneEvent { + /** + * The index of the content part that the refusal text is finalized. + */ + content_index: number; + + /** + * The ID of the output item that the refusal text is finalized. + */ + item_id: string; + + /** + * The index of the output item that the refusal text is finalized. + */ + output_index: number; + + /** + * The refusal text that is finalized. + */ + refusal: string; + + /** + * The type of the event. Always `response.refusal.done`. + */ + type: 'response.refusal.done'; +} + +/** + * The status of the response generation. One of `completed`, `failed`, + * `in_progress`, or `incomplete`. + */ +export type ResponseStatus = 'completed' | 'failed' | 'in_progress' | 'incomplete'; + +/** + * Emitted when there is a partial audio response. + */ +export type ResponseStreamEvent = + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseCodeInterpreterCallCodeDeltaEvent + | ResponseCodeInterpreterCallCodeDoneEvent + | ResponseCodeInterpreterCallCompletedEvent + | ResponseCodeInterpreterCallInProgressEvent + | ResponseCodeInterpreterCallInterpretingEvent + | ResponseCompletedEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent + | ResponseCreatedEvent + | ResponseErrorEvent + | ResponseFileSearchCallCompletedEvent + | ResponseFileSearchCallInProgressEvent + | ResponseFileSearchCallSearchingEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent + | ResponseInProgressEvent + | ResponseFailedEvent + | ResponseIncompleteEvent + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseRefusalDeltaEvent + | ResponseRefusalDoneEvent + | ResponseTextAnnotationDeltaEvent + | ResponseTextDeltaEvent + | ResponseTextDoneEvent + | ResponseWebSearchCallCompletedEvent + | ResponseWebSearchCallInProgressEvent + | ResponseWebSearchCallSearchingEvent; + +/** + * Emitted when a text annotation is added. + */ +export interface ResponseTextAnnotationDeltaEvent { + /** + * A citation to a file. + */ + annotation: + | ResponseTextAnnotationDeltaEvent.FileCitation + | ResponseTextAnnotationDeltaEvent.URLCitation + | ResponseTextAnnotationDeltaEvent.FilePath; + + /** + * The index of the annotation that was added. + */ + annotation_index: number; + + /** + * The index of the content part that the text annotation was added to. + */ + content_index: number; + + /** + * The ID of the output item that the text annotation was added to. + */ + item_id: string; + + /** + * The index of the output item that the text annotation was added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_text.annotation.added`. + */ + type: 'response.output_text.annotation.added'; +} + +export namespace ResponseTextAnnotationDeltaEvent { + /** + * A citation to a file. + */ + export interface FileCitation { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file citation. Always `file_citation`. + */ + type: 'file_citation'; + } + + /** + * A citation for a web resource used to generate a model response. + */ + export interface URLCitation { + /** + * The index of the last character of the URL citation in the message. + */ + end_index: number; + + /** + * The index of the first character of the URL citation in the message. + */ + start_index: number; + + /** + * The title of the web resource. + */ + title: string; + + /** + * The type of the URL citation. Always `url_citation`. + */ + type: 'url_citation'; + + /** + * The URL of the web resource. + */ + url: string; + } + + /** + * A path to a file. + */ + export interface FilePath { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file path. Always `file_path`. + */ + type: 'file_path'; + } +} + +/** + * Configuration options for a text response from the model. Can be plain text or + * structured JSON data. Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + */ +export interface ResponseTextConfig { + /** + * An object specifying the format that the model must output. + * + * Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + * ensures the model will match your supplied JSON schema. Learn more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * The default format is `{ "type": "text" }` with no additional options. + * + * **Not recommended for gpt-4o and newer models:** + * + * Setting to `{ "type": "json_object" }` enables the older JSON mode, which + * ensures the message the model generates is valid JSON. Using `json_schema` is + * preferred for models that support it. + */ + format?: ResponseFormatTextConfig; +} + +/** + * Emitted when there is an additional text delta. + */ +export interface ResponseTextDeltaEvent { + /** + * The index of the content part that the text delta was added to. + */ + content_index: number; + + /** + * The text delta that was added. + */ + delta: string; + + /** + * The ID of the output item that the text delta was added to. + */ + item_id: string; + + /** + * The index of the output item that the text delta was added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_text.delta`. + */ + type: 'response.output_text.delta'; +} + +/** + * Emitted when text content is finalized. + */ +export interface ResponseTextDoneEvent { + /** + * The index of the content part that the text content is finalized. + */ + content_index: number; + + /** + * The ID of the output item that the text content is finalized. + */ + item_id: string; + + /** + * The index of the output item that the text content is finalized. + */ + output_index: number; + + /** + * The text content that is finalized. + */ + text: string; + + /** + * The type of the event. Always `response.output_text.done`. + */ + type: 'response.output_text.done'; +} + +/** + * Represents token usage details including input tokens, output tokens, a + * breakdown of output tokens, and the total tokens used. + */ +export interface ResponseUsage { + /** + * The number of input tokens. + */ + input_tokens: number; + + /** + * The number of output tokens. + */ + output_tokens: number; + + /** + * A detailed breakdown of the output tokens. + */ + output_tokens_details: ResponseUsage.OutputTokensDetails; + + /** + * The total number of tokens used. + */ + total_tokens: number; +} + +export namespace ResponseUsage { + /** + * A detailed breakdown of the output tokens. + */ + export interface OutputTokensDetails { + /** + * The number of reasoning tokens. + */ + reasoning_tokens: number; + } +} + +/** + * Emitted when a web search call is completed. + */ +export interface ResponseWebSearchCallCompletedEvent { + /** + * Unique ID for the output item associated with the web search call. + */ + item_id: string; + + /** + * The index of the output item that the web search call is associated with. + */ + output_index: number; + + /** + * The type of the event. Always `response.web_search_call.completed`. + */ + type: 'response.web_search_call.completed'; +} + +/** + * Emitted when a web search call is initiated. + */ +export interface ResponseWebSearchCallInProgressEvent { + /** + * Unique ID for the output item associated with the web search call. + */ + item_id: string; + + /** + * The index of the output item that the web search call is associated with. + */ + output_index: number; + + /** + * The type of the event. Always `response.web_search_call.in_progress`. + */ + type: 'response.web_search_call.in_progress'; +} + +/** + * Emitted when a web search call is executing. + */ +export interface ResponseWebSearchCallSearchingEvent { + /** + * Unique ID for the output item associated with the web search call. + */ + item_id: string; + + /** + * The index of the output item that the web search call is associated with. + */ + output_index: number; + + /** + * The type of the event. Always `response.web_search_call.searching`. + */ + type: 'response.web_search_call.searching'; +} + +/** + * A tool that searches for relevant content from uploaded files. Learn more about + * the + * [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + */ +export type Tool = FileSearchTool | FunctionTool | ComputerTool | WebSearchTool; + +/** + * Use this option to force the model to call a specific function. + */ +export interface ToolChoiceFunction { + /** + * The name of the function to call. + */ + name: string; + + /** + * For function calling, the type is always `function`. + */ + type: 'function'; +} + +/** + * Controls which (if any) tool is called by the model. + * + * `none` means the model will not call any tool and instead generates a message. + * + * `auto` means the model can pick between generating a message or calling one or + * more tools. + * + * `required` means the model must call one or more tools. + */ +export type ToolChoiceOptions = 'none' | 'auto' | 'required'; + +/** + * Indicates that the model should use a built-in tool to generate a response. + * [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). + */ +export interface ToolChoiceTypes { + /** + * The type of hosted tool the model should to use. Learn more about + * [built-in tools](https://platform.openai.com/docs/guides/tools). + * + * Allowed values are: + * + * - `file_search` + * - `web_search_preview` + * - `computer_use_preview` + */ + type: 'file_search' | 'web_search_preview' | 'computer_use_preview' | 'web_search_preview_2025_03_11'; +} + +/** + * This tool searches the web for relevant results to use in a response. Learn more + * about the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + */ +export interface WebSearchTool { + /** + * The type of the web search tool. One of: + * + * - `web_search_preview` + * - `web_search_preview_2025_03_11` + */ + type: 'web_search_preview' | 'web_search_preview_2025_03_11'; + + /** + * High level guidance for the amount of context window space to use for the + * search. One of `low`, `medium`, or `high`. `medium` is the default. + */ + search_context_size?: 'low' | 'medium' | 'high'; + + user_location?: WebSearchTool.UserLocation | null; +} + +export namespace WebSearchTool { + export interface UserLocation { + /** + * The type of location approximation. Always `approximate`. + */ + type: 'approximate'; + + /** + * Free text input for the city of the user, e.g. `San Francisco`. + */ + city?: string; + + /** + * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + * the user, e.g. `US`. + */ + country?: string; + + /** + * Free text input for the region of the user, e.g. `California`. + */ + region?: string; + + /** + * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + * user, e.g. `America/Los_Angeles`. + */ + timezone?: string; + } +} + +export type ResponseCreateParams = ResponseCreateParamsNonStreaming | ResponseCreateParamsStreaming; + +export interface ResponseCreateParamsBase { + /** + * Text, image, or file inputs to the model, used to generate a response. + * + * Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Image inputs](https://platform.openai.com/docs/guides/images) + * - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + * - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + * - [Function calling](https://platform.openai.com/docs/guides/function-calling) + */ + input: string | ResponseInput; + + /** + * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * wide range of models with different capabilities, performance characteristics, + * and price points. Refer to the + * [model guide](https://platform.openai.com/docs/models) to browse and compare + * available models. + */ + model: (string & {}) | Shared.ChatModel; + + /** + * Specify additional output data to include in the model response. Currently + * supported values are: + * + * - `file_search_call.results`: Include the search results of the file search tool + * call. + * - `message.input_image.image_url`: Include image urls from the input message. + * - `computer_call_output.output.image_url`: Include image urls from the computer + * call output. + */ + include?: Array | null; + + /** + * Inserts a system (or developer) message as the first item in the model's + * context. + * + * When using along with `previous_response_id`, the instructions from a previous + * response will be not be carried over to the next response. This makes it simple + * to swap out system (or developer) messages in new responses. + */ + instructions?: string | null; + + /** + * An upper bound for the number of tokens that can be generated for a response, + * including visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + max_output_tokens?: number | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * Whether to allow the model to run tool calls in parallel. + */ + parallel_tool_calls?: boolean | null; + + /** + * The unique ID of the previous response to the model. Use this to create + * multi-turn conversations. Learn more about + * [conversation state](https://platform.openai.com/docs/guides/conversation-state). + */ + previous_response_id?: string | null; + + /** + * **o-series models only** + * + * Configuration options for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). + */ + reasoning?: Shared.Reasoning | null; + + /** + * Whether to store the generated model response for later retrieval via API. + */ + store?: boolean | null; + + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + * for more information. + */ + stream?: boolean | null; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. We generally recommend altering this or `top_p` but + * not both. + */ + temperature?: number | null; + + /** + * Configuration options for a text response from the model. Can be plain text or + * structured JSON data. Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + */ + text?: ResponseTextConfig; + + /** + * How the model should select which tool (or tools) to use when generating a + * response. See the `tools` parameter to see how to specify which tools the model + * can call. + */ + tool_choice?: ToolChoiceOptions | ToolChoiceTypes | ToolChoiceFunction; + + /** + * An array of tools the model may call while generating a response. You can + * specify which tool to use by setting the `tool_choice` parameter. + * + * The two categories of tools you can provide the model are: + * + * - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + * capabilities, like + * [web search](https://platform.openai.com/docs/guides/tools-web-search) or + * [file search](https://platform.openai.com/docs/guides/tools-file-search). + * Learn more about + * [built-in tools](https://platform.openai.com/docs/guides/tools). + * - **Function calls (custom tools)**: Functions that are defined by you, enabling + * the model to call your own code. Learn more about + * [function calling](https://platform.openai.com/docs/guides/function-calling). + */ + tools?: Array; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p?: number | null; + + /** + * The truncation strategy to use for the model response. + * + * - `auto`: If the context of this response and previous ones exceeds the model's + * context window size, the model will truncate the response to fit the context + * window by dropping input items in the middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window size + * for a model, the request will fail with a 400 error. + */ + truncation?: 'auto' | 'disabled' | null; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor + * and detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + user?: string; +} + +export namespace ResponseCreateParams { + export type ResponseCreateParamsNonStreaming = ResponsesAPI.ResponseCreateParamsNonStreaming; + export type ResponseCreateParamsStreaming = ResponsesAPI.ResponseCreateParamsStreaming; +} + +export interface ResponseCreateParamsNonStreaming extends ResponseCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + * for more information. + */ + stream?: false | null; +} + +export interface ResponseCreateParamsStreaming extends ResponseCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + * for more information. + */ + stream: true; +} + +export interface ResponseRetrieveParams { + /** + * Additional fields to include in the response. See the `include` parameter for + * Response creation above for more information. + */ + include?: Array; +} + +Responses.InputItems = InputItems; +Responses.ResponseItemListDataPage = ResponseItemListDataPage; + +export declare namespace Responses { + export { + InputItems as InputItems, + type ResponseItemList as ResponseItemList, + ResponseItemListDataPage as ResponseItemListDataPage, + type InputItemListParams as InputItemListParams, + }; +} diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 3bb11582f..86b2d2dee 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,5 +1,96 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export type ChatModel = + | 'o3-mini' + | 'o3-mini-2025-01-31' + | 'o1' + | 'o1-2024-12-17' + | 'o1-preview' + | 'o1-preview-2024-09-12' + | 'o1-mini' + | 'o1-mini-2024-09-12' + | 'computer-use-preview' + | 'computer-use-preview-2025-02-04' + | 'computer-use-preview-2025-03-11' + | 'gpt-4.5-preview' + | 'gpt-4.5-preview-2025-02-27' + | 'gpt-4o' + | 'gpt-4o-2024-11-20' + | 'gpt-4o-2024-08-06' + | 'gpt-4o-2024-05-13' + | 'gpt-4o-audio-preview' + | 'gpt-4o-audio-preview-2024-10-01' + | 'gpt-4o-audio-preview-2024-12-17' + | 'gpt-4o-mini-audio-preview' + | 'gpt-4o-mini-audio-preview-2024-12-17' + | 'chatgpt-4o-latest' + | 'gpt-4o-mini' + | 'gpt-4o-mini-2024-07-18' + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0301' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613'; + +/** + * A filter used to compare a specified attribute key to a given value using a + * defined comparison operation. + */ +export interface ComparisonFilter { + /** + * The key to compare against the value. + */ + key: string; + + /** + * Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + * + * - `eq`: equals + * - `ne`: not equal + * - `gt`: greater than + * - `gte`: greater than or equal + * - `lt`: less than + * - `lte`: less than or equal + */ + type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte'; + + /** + * The value to compare against the attribute key; supports string, number, or + * boolean types. + */ + value: string | number | boolean; +} + +/** + * Combine multiple filters using `and` or `or`. + */ +export interface CompoundFilter { + /** + * Array of filters to combine. Items can be `ComparisonFilter` or + * `CompoundFilter`. + */ + filters: Array; + + /** + * Type of operation: `and` or `or`. + */ + type: 'and' | 'or'; +} + export interface ErrorObject { code: string | null; @@ -65,23 +156,76 @@ export type FunctionParameters = Record; */ export type Metadata = Record; +/** + * **o-series models only** + * + * Configuration options for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). + */ +export interface Reasoning { + /** + * **o-series models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + effort: ReasoningEffort | null; + + /** + * **o-series models only** + * + * A summary of the reasoning performed by the model. This can be useful for + * debugging and understanding the model's reasoning process. One of `concise` or + * `detailed`. + */ + generate_summary?: 'concise' | 'detailed' | null; +} + +/** + * **o-series models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ +export type ReasoningEffort = 'low' | 'medium' | 'high' | null; + +/** + * JSON object response format. An older method of generating JSON responses. Using + * `json_schema` is recommended for models that support it. Note that the model + * will not generate JSON without a system or user message instructing it to do so. + */ export interface ResponseFormatJSONObject { /** - * The type of response format being defined: `json_object` + * The type of response format being defined. Always `json_object`. */ type: 'json_object'; } +/** + * JSON Schema response format. Used to generate structured JSON responses. Learn + * more about + * [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + */ export interface ResponseFormatJSONSchema { + /** + * Structured Outputs configuration options, including a JSON Schema. + */ json_schema: ResponseFormatJSONSchema.JSONSchema; /** - * The type of response format being defined: `json_schema` + * The type of response format being defined. Always `json_schema`. */ type: 'json_schema'; } export namespace ResponseFormatJSONSchema { + /** + * Structured Outputs configuration options, including a JSON Schema. + */ export interface JSONSchema { /** * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores @@ -96,7 +240,8 @@ export namespace ResponseFormatJSONSchema { description?: string; /** - * The schema for the response format, described as a JSON Schema object. + * The schema for the response format, described as a JSON Schema object. Learn how + * to build JSON schemas [here](https://json-schema.org/). */ schema?: Record; @@ -111,9 +256,12 @@ export namespace ResponseFormatJSONSchema { } } +/** + * Default response format. Used to generate text responses. + */ export interface ResponseFormatText { /** - * The type of response format being defined: `text` + * The type of response format being defined. Always `text`. */ type: 'text'; } diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts index f977e18f6..9e046b48d 100644 --- a/src/resources/uploads/uploads.ts +++ b/src/resources/uploads/uploads.ts @@ -22,10 +22,9 @@ export class Uploads extends APIResource { * contains all the parts you uploaded. This File is usable in the rest of our * platform as a regular File object. * - * For certain `purpose`s, the correct `mime_type` must be specified. Please refer - * to documentation for the supported MIME types for your use case: - * - * - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files) + * For certain `purpose` values, the correct `mime_type` must be specified. Please + * refer to documentation for the + * [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). * * For guidance on the proper filename extensions for each purpose, please follow * the documentation on diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/vector-stores/file-batches.ts similarity index 92% rename from src/resources/beta/vector-stores/file-batches.ts rename to src/resources/vector-stores/file-batches.ts index 2c47cb9c2..9be1d81a3 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/vector-stores/file-batches.ts @@ -1,15 +1,15 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import { isRequestOptions } from '../../../core'; -import { sleep } from '../../../core'; -import { Uploadable } from '../../../core'; -import { allSettledWithThrow } from '../../../lib/Util'; -import * as Core from '../../../core'; +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import { sleep } from '../../core'; +import { Uploadable } from '../../core'; +import { allSettledWithThrow } from '../../lib/Util'; +import * as Core from '../../core'; import * as FilesAPI from './files'; import { VectorStoreFilesPage } from './files'; import * as VectorStoresAPI from './vector-stores'; -import { type CursorPageParams } from '../../../pagination'; +import { type CursorPageParams } from '../../pagination'; export class FileBatches extends APIResource { /** @@ -265,6 +265,15 @@ export interface FileBatchCreateParams { */ file_ids: Array; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/vector-stores/files.ts similarity index 74% rename from src/resources/beta/vector-stores/files.ts rename to src/resources/vector-stores/files.ts index 1fda9a99b..28caf9781 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/vector-stores/files.ts @@ -1,10 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import { sleep, Uploadable, isRequestOptions } from '../../../core'; -import * as Core from '../../../core'; +import { APIResource } from '../../resource'; +import { sleep, Uploadable, isRequestOptions } from '../../core'; +import * as Core from '../../core'; import * as VectorStoresAPI from './vector-stores'; -import { CursorPage, type CursorPageParams } from '../../../pagination'; +import { CursorPage, type CursorPageParams, Page } from '../../pagination'; export class Files extends APIResource { /** @@ -38,6 +38,22 @@ export class Files extends APIResource { }); } + /** + * Update attributes on a vector store file. + */ + update( + vectorStoreId: string, + fileId: string, + body: FileUpdateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/vector_stores/${vectorStoreId}/files/${fileId}`, { + body, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } + /** * Returns a list of vector store files. */ @@ -167,10 +183,30 @@ export class Files extends APIResource { const fileInfo = await this.upload(vectorStoreId, file, options); return await this.poll(vectorStoreId, fileInfo.id, options); } + + /** + * Retrieve the parsed contents of a vector store file. + */ + content( + vectorStoreId: string, + fileId: string, + options?: Core.RequestOptions, + ): Core.PagePromise { + return this._client.getAPIList( + `/vector_stores/${vectorStoreId}/files/${fileId}/content`, + FileContentResponsesPage, + { ...options, headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers } }, + ); + } } export class VectorStoreFilesPage extends CursorPage {} +/** + * Note: no pagination actually occurs yet, this is for forwards-compatibility. + */ +export class FileContentResponsesPage extends Page {} + /** * A list of files attached to a vector store. */ @@ -217,6 +253,15 @@ export interface VectorStoreFile { */ vector_store_id: string; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + /** * The strategy used to chunk the file. */ @@ -249,6 +294,18 @@ export interface VectorStoreFileDeleted { object: 'vector_store.file.deleted'; } +export interface FileContentResponse { + /** + * The text content + */ + text?: string; + + /** + * The content type (currently only `"text"`) + */ + type?: string; +} + export interface FileCreateParams { /** * A [File](https://platform.openai.com/docs/api-reference/files) ID that the @@ -257,6 +314,15 @@ export interface FileCreateParams { */ file_id: string; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. @@ -264,6 +330,17 @@ export interface FileCreateParams { chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; } +export interface FileUpdateParams { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes: Record | null; +} + export interface FileListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place @@ -286,13 +363,17 @@ export interface FileListParams extends CursorPageParams { } Files.VectorStoreFilesPage = VectorStoreFilesPage; +Files.FileContentResponsesPage = FileContentResponsesPage; export declare namespace Files { export { type VectorStoreFile as VectorStoreFile, type VectorStoreFileDeleted as VectorStoreFileDeleted, + type FileContentResponse as FileContentResponse, VectorStoreFilesPage as VectorStoreFilesPage, + FileContentResponsesPage as FileContentResponsesPage, type FileCreateParams as FileCreateParams, + type FileUpdateParams as FileUpdateParams, type FileListParams as FileListParams, }; } diff --git a/src/resources/beta/vector-stores/index.ts b/src/resources/vector-stores/index.ts similarity index 82% rename from src/resources/beta/vector-stores/index.ts rename to src/resources/vector-stores/index.ts index d587bd160..9cbcbc0b2 100644 --- a/src/resources/beta/vector-stores/index.ts +++ b/src/resources/vector-stores/index.ts @@ -8,14 +8,18 @@ export { } from './file-batches'; export { VectorStoreFilesPage, + FileContentResponsesPage, Files, type VectorStoreFile, type VectorStoreFileDeleted, + type FileContentResponse, type FileCreateParams, + type FileUpdateParams, type FileListParams, } from './files'; export { VectorStoresPage, + VectorStoreSearchResponsesPage, VectorStores, type AutoFileChunkingStrategyParam, type FileChunkingStrategy, @@ -26,7 +30,9 @@ export { type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, + type VectorStoreSearchResponse, type VectorStoreCreateParams, type VectorStoreUpdateParams, type VectorStoreListParams, + type VectorStoreSearchParams, } from './vector-stores'; diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts similarity index 77% rename from src/resources/beta/vector-stores/vector-stores.ts rename to src/resources/vector-stores/vector-stores.ts index 8438b79da..7d61e7fd6 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import { isRequestOptions } from '../../../core'; -import * as Core from '../../../core'; -import * as Shared from '../../shared'; +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as Shared from '../shared'; import * as FileBatchesAPI from './file-batches'; import { FileBatchCreateParams, @@ -13,14 +13,17 @@ import { } from './file-batches'; import * as FilesAPI from './files'; import { + FileContentResponse, + FileContentResponsesPage, FileCreateParams, FileListParams, + FileUpdateParams, Files, VectorStoreFile, VectorStoreFileDeleted, VectorStoreFilesPage, } from './files'; -import { CursorPage, type CursorPageParams } from '../../../pagination'; +import { CursorPage, type CursorPageParams, Page } from '../../pagination'; export class VectorStores extends APIResource { files: FilesAPI.Files = new FilesAPI.Files(this._client); @@ -93,10 +96,32 @@ export class VectorStores extends APIResource { headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } + + /** + * Search a vector store for relevant chunks based on a query and file attributes + * filter. + */ + search( + vectorStoreId: string, + body: VectorStoreSearchParams, + options?: Core.RequestOptions, + ): Core.PagePromise { + return this._client.getAPIList(`/vector_stores/${vectorStoreId}/search`, VectorStoreSearchResponsesPage, { + body, + method: 'post', + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } } export class VectorStoresPage extends CursorPage {} +/** + * Note: no pagination actually occurs yet, this is for forwards-compatibility. + */ +export class VectorStoreSearchResponsesPage extends Page {} + /** * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of * `800` and `chunk_overlap_tokens` of `400`. @@ -155,6 +180,9 @@ export interface StaticFileChunkingStrategyObject { type: 'static'; } +/** + * Customize your own chunking strategy by setting chunk size and chunk overlap. + */ export interface StaticFileChunkingStrategyObjectParam { static: StaticFileChunkingStrategy; @@ -282,6 +310,51 @@ export interface VectorStoreDeleted { object: 'vector_store.deleted'; } +export interface VectorStoreSearchResponse { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes: Record | null; + + /** + * Content chunks from the file. + */ + content: Array; + + /** + * The ID of the vector store file. + */ + file_id: string; + + /** + * The name of the vector store file. + */ + filename: string; + + /** + * The similarity score for the result. + */ + score: number; +} + +export namespace VectorStoreSearchResponse { + export interface Content { + /** + * The text content returned from search. + */ + text: string; + + /** + * The type of content. + */ + type: 'text'; + } +} + export interface VectorStoreCreateParams { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` @@ -391,9 +464,50 @@ export interface VectorStoreListParams extends CursorPageParams { order?: 'asc' | 'desc'; } +export interface VectorStoreSearchParams { + /** + * A query string for a search + */ + query: string | Array; + + /** + * A filter to apply based on file attributes. + */ + filters?: Shared.ComparisonFilter | Shared.CompoundFilter; + + /** + * The maximum number of results to return. This number should be between 1 and 50 + * inclusive. + */ + max_num_results?: number; + + /** + * Ranking options for search. + */ + ranking_options?: VectorStoreSearchParams.RankingOptions; + + /** + * Whether to rewrite the natural language query for vector search. + */ + rewrite_query?: boolean; +} + +export namespace VectorStoreSearchParams { + /** + * Ranking options for search. + */ + export interface RankingOptions { + ranker?: 'auto' | 'default-2024-11-15'; + + score_threshold?: number; + } +} + VectorStores.VectorStoresPage = VectorStoresPage; +VectorStores.VectorStoreSearchResponsesPage = VectorStoreSearchResponsesPage; VectorStores.Files = Files; VectorStores.VectorStoreFilesPage = VectorStoreFilesPage; +VectorStores.FileContentResponsesPage = FileContentResponsesPage; VectorStores.FileBatches = FileBatches; export declare namespace VectorStores { @@ -407,18 +521,24 @@ export declare namespace VectorStores { type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, + type VectorStoreSearchResponse as VectorStoreSearchResponse, VectorStoresPage as VectorStoresPage, + VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, type VectorStoreCreateParams as VectorStoreCreateParams, type VectorStoreUpdateParams as VectorStoreUpdateParams, type VectorStoreListParams as VectorStoreListParams, + type VectorStoreSearchParams as VectorStoreSearchParams, }; export { Files as Files, type VectorStoreFile as VectorStoreFile, type VectorStoreFileDeleted as VectorStoreFileDeleted, + type FileContentResponse as FileContentResponse, VectorStoreFilesPage as VectorStoreFilesPage, + FileContentResponsesPage as FileContentResponsesPage, type FileCreateParams as FileCreateParams, + type FileUpdateParams as FileUpdateParams, type FileListParams as FileListParams, }; diff --git a/src/streaming.ts b/src/streaming.ts index 52266154c..25b960314 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -41,7 +41,7 @@ export class Stream implements AsyncIterable { continue; } - if (sse.event === null) { + if (sse.event === null || sse.event.startsWith('response.')) { let data; try { diff --git a/tests/api-resources/chat/completions/completions.test.ts b/tests/api-resources/chat/completions/completions.test.ts index acdd631db..eddf252b1 100644 --- a/tests/api-resources/chat/completions/completions.test.ts +++ b/tests/api-resources/chat/completions/completions.test.ts @@ -43,9 +43,9 @@ describe('resource completions', () => { presence_penalty: -2, reasoning_effort: 'low', response_format: { type: 'text' }, - seed: 0, + seed: -9007199254740991, service_tier: 'auto', - stop: 'string', + stop: '\n', store: true, stream: false, stream_options: { include_usage: true }, @@ -60,6 +60,13 @@ describe('resource completions', () => { top_logprobs: 0, top_p: 1, user: 'user-1234', + web_search_options: { + search_context_size: 'low', + user_location: { + approximate: { city: 'city', country: 'country', region: 'region', timezone: 'timezone' }, + type: 'approximate', + }, + }, }); }); diff --git a/tests/api-resources/responses/input-items.test.ts b/tests/api-resources/responses/input-items.test.ts new file mode 100644 index 000000000..51b86f1b3 --- /dev/null +++ b/tests/api-resources/responses/input-items.test.ts @@ -0,0 +1,40 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource inputItems', () => { + test('list', async () => { + const responsePromise = client.responses.inputItems.list('response_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.inputItems.list('response_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.inputItems.list( + 'response_id', + { after: 'after', before: 'before', limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/beta/vector-stores/files.test.ts b/tests/api-resources/responses/responses.test.ts similarity index 58% rename from tests/api-resources/beta/vector-stores/files.test.ts rename to tests/api-resources/responses/responses.test.ts index 7c14d4de3..e10722738 100644 --- a/tests/api-resources/beta/vector-stores/files.test.ts +++ b/tests/api-resources/responses/responses.test.ts @@ -8,9 +8,9 @@ const client = new OpenAI({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); -describe('resource files', () => { +describe('resource responses', () => { test('create: only required params', async () => { - const responsePromise = client.beta.vectorStores.files.create('vs_abc123', { file_id: 'file_id' }); + const responsePromise = client.responses.create({ input: 'string', model: 'gpt-4o' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,14 +21,38 @@ describe('resource files', () => { }); test('create: required and optional params', async () => { - const response = await client.beta.vectorStores.files.create('vs_abc123', { - file_id: 'file_id', - chunking_strategy: { type: 'auto' }, + const response = await client.responses.create({ + input: 'string', + model: 'gpt-4o', + include: ['file_search_call.results'], + instructions: 'instructions', + max_output_tokens: 0, + metadata: { foo: 'string' }, + parallel_tool_calls: true, + previous_response_id: 'previous_response_id', + reasoning: { effort: 'low', generate_summary: 'concise' }, + store: true, + stream: false, + temperature: 1, + text: { format: { type: 'text' } }, + tool_choice: 'none', + tools: [ + { + type: 'file_search', + vector_store_ids: ['string'], + filters: { key: 'key', type: 'eq', value: 'string' }, + max_num_results: 0, + ranking_options: { ranker: 'auto', score_threshold: 0 }, + }, + ], + top_p: 1, + truncation: 'auto', + user: 'user-1234', }); }); test('retrieve', async () => { - const responsePromise = client.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123'); + const responsePromise = client.responses.retrieve('resp_677efb5139a88190b512bc3fef8e535d'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -41,43 +65,25 @@ describe('resource files', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123', { + client.responses.retrieve('resp_677efb5139a88190b512bc3fef8e535d', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); - test('list', async () => { - const responsePromise = client.beta.vectorStores.files.list('vector_store_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('list: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.beta.vectorStores.files.list('vector_store_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(OpenAI.NotFoundError); - }); - - test('list: request options and params are passed correctly', async () => { + test('retrieve: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.files.list( - 'vector_store_id', - { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, + client.responses.retrieve( + 'resp_677efb5139a88190b512bc3fef8e535d', + { include: ['file_search_call.results'] }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); }); test('del', async () => { - const responsePromise = client.beta.vectorStores.files.del('vector_store_id', 'file_id'); + const responsePromise = client.responses.del('resp_677efb5139a88190b512bc3fef8e535d'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -90,7 +96,7 @@ describe('resource files', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.files.del('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), + client.responses.del('resp_677efb5139a88190b512bc3fef8e535d', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/beta/vector-stores/file-batches.test.ts b/tests/api-resources/vector-stores/file-batches.test.ts similarity index 81% rename from tests/api-resources/beta/vector-stores/file-batches.test.ts rename to tests/api-resources/vector-stores/file-batches.test.ts index b714049b4..c0447a838 100644 --- a/tests/api-resources/beta/vector-stores/file-batches.test.ts +++ b/tests/api-resources/vector-stores/file-batches.test.ts @@ -10,9 +10,7 @@ const client = new OpenAI({ describe('resource fileBatches', () => { test('create: only required params', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.create('vs_abc123', { - file_ids: ['string'], - }); + const responsePromise = client.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'] }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -23,14 +21,15 @@ describe('resource fileBatches', () => { }); test('create: required and optional params', async () => { - const response = await client.beta.vectorStores.fileBatches.create('vs_abc123', { + const response = await client.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'], + attributes: { foo: 'string' }, chunking_strategy: { type: 'auto' }, }); }); test('retrieve', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123'); + const responsePromise = client.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -43,14 +42,14 @@ describe('resource fileBatches', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123', { + client.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('cancel', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id'); + const responsePromise = client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -63,14 +62,14 @@ describe('resource fileBatches', () => { test('cancel: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', { + client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('listFiles', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id'); + const responsePromise = client.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -83,7 +82,7 @@ describe('resource fileBatches', () => { test('listFiles: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id', { + client.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); @@ -92,7 +91,7 @@ describe('resource fileBatches', () => { test('listFiles: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.listFiles( + client.vectorStores.fileBatches.listFiles( 'vector_store_id', 'batch_id', { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, diff --git a/tests/api-resources/vector-stores/files.test.ts b/tests/api-resources/vector-stores/files.test.ts new file mode 100644 index 000000000..86a8f9bb4 --- /dev/null +++ b/tests/api-resources/vector-stores/files.test.ts @@ -0,0 +1,132 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource files', () => { + test('create: only required params', async () => { + const responsePromise = client.vectorStores.files.create('vs_abc123', { file_id: 'file_id' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.vectorStores.files.create('vs_abc123', { + file_id: 'file_id', + attributes: { foo: 'string' }, + chunking_strategy: { type: 'auto' }, + }); + }); + + test('retrieve', async () => { + const responsePromise = client.vectorStores.files.retrieve('vs_abc123', 'file-abc123'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.retrieve('vs_abc123', 'file-abc123', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('update: only required params', async () => { + const responsePromise = client.vectorStores.files.update('vs_abc123', 'file-abc123', { + attributes: { foo: 'string' }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('update: required and optional params', async () => { + const response = await client.vectorStores.files.update('vs_abc123', 'file-abc123', { + attributes: { foo: 'string' }, + }); + }); + + test('list', async () => { + const responsePromise = client.vectorStores.files.list('vector_store_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.list('vector_store_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.list( + 'vector_store_id', + { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.vectorStores.files.del('vector_store_id', 'file_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.del('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('content', async () => { + const responsePromise = client.vectorStores.files.content('vs_abc123', 'file-abc123'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('content: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.content('vs_abc123', 'file-abc123', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/beta/vector-stores/vector-stores.test.ts b/tests/api-resources/vector-stores/vector-stores.test.ts similarity index 71% rename from tests/api-resources/beta/vector-stores/vector-stores.test.ts rename to tests/api-resources/vector-stores/vector-stores.test.ts index 806098de8..465904a00 100644 --- a/tests/api-resources/beta/vector-stores/vector-stores.test.ts +++ b/tests/api-resources/vector-stores/vector-stores.test.ts @@ -10,7 +10,7 @@ const client = new OpenAI({ describe('resource vectorStores', () => { test('create', async () => { - const responsePromise = client.beta.vectorStores.create({}); + const responsePromise = client.vectorStores.create({}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,7 +21,7 @@ describe('resource vectorStores', () => { }); test('retrieve', async () => { - const responsePromise = client.beta.vectorStores.retrieve('vector_store_id'); + const responsePromise = client.vectorStores.retrieve('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -34,12 +34,12 @@ describe('resource vectorStores', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.retrieve('vector_store_id', { path: '/_stainless_unknown_path' }), + client.vectorStores.retrieve('vector_store_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('update', async () => { - const responsePromise = client.beta.vectorStores.update('vector_store_id', {}); + const responsePromise = client.vectorStores.update('vector_store_id', {}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -50,7 +50,7 @@ describe('resource vectorStores', () => { }); test('list', async () => { - const responsePromise = client.beta.vectorStores.list(); + const responsePromise = client.vectorStores.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -62,7 +62,7 @@ describe('resource vectorStores', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.beta.vectorStores.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.vectorStores.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); @@ -70,7 +70,7 @@ describe('resource vectorStores', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.list( + client.vectorStores.list( { after: 'after', before: 'before', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), @@ -78,7 +78,7 @@ describe('resource vectorStores', () => { }); test('del', async () => { - const responsePromise = client.beta.vectorStores.del('vector_store_id'); + const responsePromise = client.vectorStores.del('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -91,7 +91,28 @@ describe('resource vectorStores', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.del('vector_store_id', { path: '/_stainless_unknown_path' }), + client.vectorStores.del('vector_store_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); + + test('search: only required params', async () => { + const responsePromise = client.vectorStores.search('vs_abc123', { query: 'string' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('search: required and optional params', async () => { + const response = await client.vectorStores.search('vs_abc123', { + query: 'string', + filters: { key: 'key', type: 'eq', value: 'string' }, + max_num_results: 1, + ranking_options: { ranker: 'auto', score_threshold: 0 }, + rewrite_query: true, + }); + }); }); From b0930694021fb07c03782387cf3ba9d8df6fb975 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 16:32:03 +0000 Subject: [PATCH 148/246] release: 4.87.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a889d24b4..e8984a56c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.86.2" + ".": "4.87.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 38d54fdc1..2ec7edb2b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.87.0 (2025-03-11) + +Full Changelog: [v4.86.2...v4.87.0](https://github.com/openai/openai-node/compare/v4.86.2...v4.87.0) + +### Features + +* **api:** add /v1/responses and built-in tools ([119b584](https://github.com/openai/openai-node/commit/119b5843a18b8014167c8d2031d75c08dbf400a3)) + ## 4.86.2 (2025-03-05) Full Changelog: [v4.86.1...v4.86.2](https://github.com/openai/openai-node/compare/v4.86.1...v4.86.2) diff --git a/jsr.json b/jsr.json index 1c0948aaa..4ac1601e7 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.86.2", + "version": "4.87.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 78afb8946..7cf4a385d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.86.2", + "version": "4.87.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c43a3c320..2b1fd6541 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.86.2'; // x-release-please-version +export const VERSION = '4.87.0'; // x-release-please-version From 21f210782b1ee3b33231cfed0277ab8e3a764bcb Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 11 Mar 2025 12:42:29 -0400 Subject: [PATCH 149/246] fix: correct imports --- src/lib/ResponsesParser.ts | 2 +- src/lib/responses/ResponseStream.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/ResponsesParser.ts b/src/lib/ResponsesParser.ts index 780b779ff..8d762d5bb 100644 --- a/src/lib/ResponsesParser.ts +++ b/src/lib/ResponsesParser.ts @@ -1,5 +1,5 @@ import { OpenAIError } from '../error'; -import { type ChatCompletionTool } from '../resources'; +import type { ChatCompletionTool } from '../resources/chat/completions'; import { type FunctionTool, type ParsedContent, diff --git a/src/lib/responses/ResponseStream.ts b/src/lib/responses/ResponseStream.ts index 0d6cd47dd..d2ee80a75 100644 --- a/src/lib/responses/ResponseStream.ts +++ b/src/lib/responses/ResponseStream.ts @@ -4,7 +4,7 @@ import { type ResponseCreateParamsBase, type ResponseCreateParamsStreaming, type ResponseStreamEvent, -} from 'openai/resources/responses/responses'; +} from '../../resources/responses/responses'; import * as Core from '../../core'; import { APIUserAbortError, OpenAIError } from '../../error'; import OpenAI from '../../index'; From 0bc08d15143f63536b2331a174bec3d0411a7356 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 16:45:57 +0000 Subject: [PATCH 150/246] release: 4.87.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e8984a56c..dab137bc4 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.87.0" + ".": "4.87.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ec7edb2b..46477c290 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.87.1 (2025-03-11) + +Full Changelog: [v4.87.0...v4.87.1](https://github.com/openai/openai-node/compare/v4.87.0...v4.87.1) + +### Bug Fixes + +* correct imports ([5cdf17c](https://github.com/openai/openai-node/commit/5cdf17cec33da7cf540b8bdbcfa30c0c52842dd1)) + ## 4.87.0 (2025-03-11) Full Changelog: [v4.86.2...v4.87.0](https://github.com/openai/openai-node/compare/v4.86.2...v4.87.0) diff --git a/jsr.json b/jsr.json index 4ac1601e7..beb9f5c47 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.87.0", + "version": "4.87.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 7cf4a385d..386015e42 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.87.0", + "version": "4.87.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 2b1fd6541..35d1d1c0f 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.87.0'; // x-release-please-version +export const VERSION = '4.87.1'; // x-release-please-version From 8ae07cc036895529a028134451fe2ab5c1661871 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 11 Mar 2025 13:55:38 -0400 Subject: [PATCH 151/246] fix(responses): correctly add output_text --- src/resources/responses/responses.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 2ad146873..060147a2b 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -79,7 +79,7 @@ export class Responses extends APIResource { | APIPromise | APIPromise> )._thenUnwrap((rsp) => { - if ('type' in rsp && rsp.type === 'response') { + if ('object' in rsp && rsp.object === 'response') { addOutputText(rsp as Response); } From 0b33959ed9d911c73b7ea4935761c702266bec6c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 17:56:33 +0000 Subject: [PATCH 152/246] release: 4.87.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index dab137bc4..464f20492 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.87.1" + ".": "4.87.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 46477c290..4a4cb5036 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.87.2 (2025-03-11) + +Full Changelog: [v4.87.1...v4.87.2](https://github.com/openai/openai-node/compare/v4.87.1...v4.87.2) + +### Bug Fixes + +* **responses:** correctly add output_text ([4ceb5cc](https://github.com/openai/openai-node/commit/4ceb5cc516b8c75d46f0042534d7658796a8cd71)) + ## 4.87.1 (2025-03-11) Full Changelog: [v4.87.0...v4.87.1](https://github.com/openai/openai-node/compare/v4.87.0...v4.87.1) diff --git a/jsr.json b/jsr.json index beb9f5c47..b6857cfb0 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.87.1", + "version": "4.87.2", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 386015e42..2fbc060df 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.87.1", + "version": "4.87.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 35d1d1c0f..854c6827d 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.87.1'; // x-release-please-version +export const VERSION = '4.87.2'; // x-release-please-version From 9cb95763cab5678c5098b37ad0fe1ec83d2c1cb7 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 11 Mar 2025 17:36:01 -0400 Subject: [PATCH 153/246] fix(responses): correct reasoning output type --- api.md | 1 + src/resources/responses/responses.ts | 131 +++++++++------------------ 2 files changed, 45 insertions(+), 87 deletions(-) diff --git a/api.md b/api.md index b21ac2d5f..2fac07f38 100644 --- a/api.md +++ b/api.md @@ -583,6 +583,7 @@ Types: - ResponseOutputMessage - ResponseOutputRefusal - ResponseOutputText +- ResponseReasoningItem - ResponseRefusalDeltaEvent - ResponseRefusalDoneEvent - ResponseStatus diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 060147a2b..72adf0696 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -36,7 +36,7 @@ export type ParsedResponseOutputItem = | ResponseFileSearchToolCall | ResponseFunctionWebSearch | ResponseComputerToolCall - | ResponseOutputItem.Reasoning; + | ResponseReasoningItem; export interface ParsedResponse extends Response { output: Array>; @@ -1562,7 +1562,7 @@ export type ResponseInputItem = | ResponseFunctionWebSearch | ResponseFunctionToolCall | ResponseInputItem.FunctionCallOutput - | ResponseInputItem.Reasoning + | ResponseReasoningItem | ResponseInputItem.ItemReference; export namespace ResponseInputItem { @@ -1707,47 +1707,6 @@ export namespace ResponseInputItem { status?: 'in_progress' | 'completed' | 'incomplete'; } - /** - * A description of the chain of thought used by a reasoning model while generating - * a response. - */ - export interface Reasoning { - /** - * The unique identifier of the reasoning content. - */ - id: string; - - /** - * Reasoning text contents. - */ - content: Array; - - /** - * The type of the object. Always `reasoning`. - */ - type: 'reasoning'; - - /** - * The status of the item. One of `in_progress`, `completed`, or `incomplete`. - * Populated when items are returned via API. - */ - status?: 'in_progress' | 'completed' | 'incomplete'; - } - - export namespace Reasoning { - export interface Content { - /** - * A short summary of the reasoning used by the model when generating the response. - */ - text: string; - - /** - * The type of the object. Always `text`. - */ - type: 'reasoning_summary'; - } - } - /** * An internal identifier for an item to reference. */ @@ -1814,50 +1773,7 @@ export type ResponseOutputItem = | ResponseFunctionToolCall | ResponseFunctionWebSearch | ResponseComputerToolCall - | ResponseOutputItem.Reasoning; - -export namespace ResponseOutputItem { - /** - * A description of the chain of thought used by a reasoning model while generating - * a response. - */ - export interface Reasoning { - /** - * The unique identifier of the reasoning content. - */ - id: string; - - /** - * Reasoning text contents. - */ - content: Array; - - /** - * The type of the object. Always `reasoning`. - */ - type: 'reasoning'; - - /** - * The status of the item. One of `in_progress`, `completed`, or `incomplete`. - * Populated when items are returned via API. - */ - status?: 'in_progress' | 'completed' | 'incomplete'; - } - - export namespace Reasoning { - export interface Content { - /** - * A short summary of the reasoning used by the model when generating the response. - */ - text: string; - - /** - * The type of the object. Always `text`. - */ - type: 'reasoning_summary'; - } - } -} + | ResponseReasoningItem; /** * Emitted when a new output item is added. @@ -2039,6 +1955,47 @@ export namespace ResponseOutputText { } } +/** + * A description of the chain of thought used by a reasoning model while generating + * a response. + */ +export interface ResponseReasoningItem { + /** + * The unique identifier of the reasoning content. + */ + id: string; + + /** + * Reasoning text contents. + */ + summary: Array; + + /** + * The type of the object. Always `reasoning`. + */ + type: 'reasoning'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + +export namespace ResponseReasoningItem { + export interface Summary { + /** + * A short summary of the reasoning used by the model when generating the response. + */ + text: string; + + /** + * The type of the object. Always `summary_text`. + */ + type: 'summary_text'; + } +} + /** * Emitted when there is a partial refusal text. */ From be2414ce22517e3259192c751de55744649deec1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 21:40:08 +0000 Subject: [PATCH 154/246] release: 4.87.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 464f20492..0c7a85094 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.87.2" + ".": "4.87.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a4cb5036..46a595495 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.87.3 (2025-03-11) + +Full Changelog: [v4.87.2...v4.87.3](https://github.com/openai/openai-node/compare/v4.87.2...v4.87.3) + +### Bug Fixes + +* **responses:** correct reasoning output type ([2abef57](https://github.com/openai/openai-node/commit/2abef57d7645a96a4b9a6b91483861cd568d2d4d)) + ## 4.87.2 (2025-03-11) Full Changelog: [v4.87.1...v4.87.2](https://github.com/openai/openai-node/compare/v4.87.1...v4.87.2) diff --git a/jsr.json b/jsr.json index b6857cfb0..1051fade0 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.87.2", + "version": "4.87.3", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 2fbc060df..9967a814d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.87.2", + "version": "4.87.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 854c6827d..e84192528 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.87.2'; // x-release-please-version +export const VERSION = '4.87.3'; // x-release-please-version From e905c95a27213ee65210b061ead4c982de01648b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 14 Mar 2025 19:24:51 +0000 Subject: [PATCH 155/246] chore(internal): remove CI condition (#1381) --- .github/workflows/ci.yml | 5 ++- .github/workflows/create-releases.yml | 50 --------------------------- .github/workflows/publish-jsr.yml | 8 +++-- .github/workflows/publish-npm.yml | 8 +++-- .github/workflows/release-doctor.yml | 1 - .stats.yml | 2 +- bin/check-release-environment | 4 --- examples/yarn.lock | 0 8 files changed, 15 insertions(+), 63 deletions(-) delete mode 100644 .github/workflows/create-releases.yml delete mode 100644 examples/yarn.lock diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fe24c0dcb..3efb3f17a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,7 @@ jobs: lint: name: lint runs-on: ubuntu-latest - if: github.repository == 'openai/openai-node' + steps: - uses: actions/checkout@v4 @@ -31,7 +31,7 @@ jobs: build: name: build runs-on: ubuntu-latest - if: github.repository == 'openai/openai-node' + steps: - uses: actions/checkout@v4 @@ -49,7 +49,6 @@ jobs: test: name: test runs-on: ubuntu-latest - if: github.repository == 'openai/openai-node' steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml deleted file mode 100644 index 19b7dd831..000000000 --- a/.github/workflows/create-releases.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Create releases -on: - schedule: - - cron: '0 5 * * *' # every day at 5am UTC - push: - branches: - - master - -jobs: - release: - name: release - if: github.ref == 'refs/heads/master' && github.repository == 'openai/openai-node' - runs-on: ubuntu-latest - environment: publish - permissions: - contents: read - id-token: write - - steps: - - uses: actions/checkout@v4 - - - uses: stainless-api/trigger-release-please@v1 - id: release - with: - repo: ${{ github.event.repository.full_name }} - stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} - - - name: Set up Node - if: ${{ steps.release.outputs.releases_created }} - uses: actions/setup-node@v3 - with: - node-version: '18' - - - name: Install dependencies - if: ${{ steps.release.outputs.releases_created }} - run: | - yarn install - - - name: Publish to NPM - if: ${{ steps.release.outputs.releases_created }} - run: | - bash ./bin/publish-npm - env: - NPM_TOKEN: ${{ secrets.OPENAI_NPM_TOKEN || secrets.NPM_TOKEN }} - - - name: Publish to JSR - if: ${{ steps.release.outputs.releases_created }} - run: | - bash ./bin/publish-jsr - diff --git a/.github/workflows/publish-jsr.yml b/.github/workflows/publish-jsr.yml index 1e46d6bfb..dc5fe0a2a 100644 --- a/.github/workflows/publish-jsr.yml +++ b/.github/workflows/publish-jsr.yml @@ -1,9 +1,13 @@ -# workflow for re-running publishing to JSR in case it fails for some reason -# you can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-jsr.yml +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to JSR in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-jsr.yml name: Publish JSR on: workflow_dispatch: + release: + types: [published] + jobs: publish: name: publish diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml index 5a3711b53..d3b184555 100644 --- a/.github/workflows/publish-npm.yml +++ b/.github/workflows/publish-npm.yml @@ -1,9 +1,13 @@ -# workflow for re-running publishing to NPM in case it fails for some reason -# you can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-npm.yml +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to NPM in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-npm.yml name: Publish NPM on: workflow_dispatch: + release: + types: [published] + jobs: publish: name: publish diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 37bc09e80..754a44931 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -19,6 +19,5 @@ jobs: run: | bash ./bin/check-release-environment env: - STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} NPM_TOKEN: ${{ secrets.OPENAI_NPM_TOKEN || secrets.NPM_TOKEN }} diff --git a/.stats.yml b/.stats.yml index 455874212..53c73037d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-be834d63e326a82494e819085137f5eb15866f3fc787db1f3afe7168d419e18a.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml diff --git a/bin/check-release-environment b/bin/check-release-environment index dbfd546bf..e51564b7d 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -2,10 +2,6 @@ errors=() -if [ -z "${STAINLESS_API_KEY}" ]; then - errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") -fi - if [ -z "${NPM_TOKEN}" ]; then errors+=("The OPENAI_NPM_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets") fi diff --git a/examples/yarn.lock b/examples/yarn.lock deleted file mode 100644 index e69de29bb..000000000 From 2cbf49a0b9a8cfbee29cec558c5ccdcebd72396f Mon Sep 17 00:00:00 2001 From: meorphis Date: Fri, 14 Mar 2025 16:53:35 -0400 Subject: [PATCH 156/246] chore(internal): update release workflows --- .github/workflows/publish-jsr.yml | 8 ++------ .github/workflows/publish-npm.yml | 8 ++------ .github/workflows/release-doctor.yml | 1 + 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/.github/workflows/publish-jsr.yml b/.github/workflows/publish-jsr.yml index dc5fe0a2a..1e46d6bfb 100644 --- a/.github/workflows/publish-jsr.yml +++ b/.github/workflows/publish-jsr.yml @@ -1,13 +1,9 @@ -# This workflow is triggered when a GitHub release is created. -# It can also be run manually to re-publish to JSR in case it failed for some reason. -# You can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-jsr.yml +# workflow for re-running publishing to JSR in case it fails for some reason +# you can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-jsr.yml name: Publish JSR on: workflow_dispatch: - release: - types: [published] - jobs: publish: name: publish diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml index d3b184555..5a3711b53 100644 --- a/.github/workflows/publish-npm.yml +++ b/.github/workflows/publish-npm.yml @@ -1,13 +1,9 @@ -# This workflow is triggered when a GitHub release is created. -# It can also be run manually to re-publish to NPM in case it failed for some reason. -# You can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-npm.yml +# workflow for re-running publishing to NPM in case it fails for some reason +# you can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-npm.yml name: Publish NPM on: workflow_dispatch: - release: - types: [published] - jobs: publish: name: publish diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 754a44931..37bc09e80 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -19,5 +19,6 @@ jobs: run: | bash ./bin/check-release-environment env: + STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} NPM_TOKEN: ${{ secrets.OPENAI_NPM_TOKEN || secrets.NPM_TOKEN }} From f4647cc7546d06145bf34113be22aabbd1b7e7ee Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 14:56:15 +0000 Subject: [PATCH 157/246] chore: add missing type alias exports (#1390) --- src/index.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/index.ts b/src/index.ts index c3abed2db..34cc3e84d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -118,6 +118,7 @@ import { ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionPredictionContent, + ChatCompletionReasoningEffort, ChatCompletionRole, ChatCompletionStoreMessage, ChatCompletionStreamOptions, @@ -129,6 +130,7 @@ import { ChatCompletionUpdateParams, ChatCompletionUserMessageParam, ChatCompletionsPage, + CreateChatCompletionRequestMessage, } from './resources/chat/completions/completions'; export interface ClientOptions { @@ -404,6 +406,8 @@ export declare namespace OpenAI { type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, From 9c45ef37249e7db3ba8aa2e81886ffe306b95da4 Mon Sep 17 00:00:00 2001 From: meorphis <108296353+meorphis@users.noreply.github.com> Date: Tue, 18 Mar 2025 11:33:27 -0400 Subject: [PATCH 158/246] chore(internal): run CI on update-specs branch --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3efb3f17a..627f5954f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,6 +3,7 @@ on: push: branches: - master + - update-specs pull_request: branches: - master @@ -87,7 +88,6 @@ jobs: ecosystem_tests: name: ecosystem tests (v${{ matrix.node-version }}) runs-on: ubuntu-latest - if: github.repository == 'openai/openai-node' timeout-minutes: 20 strategy: fail-fast: false From e983d0c61d33b106f149d87eed90378bd0bbc349 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 17:54:03 +0000 Subject: [PATCH 159/246] fix(api): correct some Responses types (#1391) --- .stats.yml | 2 +- src/resources/batches.ts | 8 +++--- src/resources/chat/completions/completions.ts | 18 ++++++++----- src/resources/responses/responses.ts | 26 +++++++++++++++---- src/resources/shared.ts | 4 +-- tests/api-resources/batches.test.ts | 4 +-- 6 files changed, 42 insertions(+), 20 deletions(-) diff --git a/.stats.yml b/.stats.yml index 53c73037d..1e04d7c26 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f763c1a35c8b9b02f1e31b9b2e09e21f98bfe8413e5079c86cbb07da2dd7779b.yml diff --git a/src/resources/batches.ts b/src/resources/batches.ts index aadda83a6..2cf2ac566 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -220,11 +220,11 @@ export interface BatchCreateParams { /** * The endpoint to be used for all requests in the batch. Currently - * `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - * Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 - * embedding inputs across all requests in the batch. + * `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + * are supported. Note that `/v1/embeddings` batches are also restricted to a + * maximum of 50,000 embedding inputs across all requests in the batch. */ - endpoint: '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions'; + endpoint: '/v1/responses' | '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions'; /** * The ID of an uploaded file that contains requests for the new batch. diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 7b1c353e2..f54c01597 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -377,10 +377,13 @@ export interface ChatCompletionChunk { /** * An optional field that will only be present when you set * `stream_options: {"include_usage": true}` in your request. When present, it - * contains a null value except for the last chunk which contains the token usage - * statistics for the entire request. + * contains a null value **except for the last chunk** which contains the token + * usage statistics for the entire request. + * + * **NOTE:** If the stream is interrupted or cancelled, you may not receive the + * final usage chunk which contains the total token usage for the request. */ - usage?: CompletionsAPI.CompletionUsage | null; + usage?: CompletionsAPI.CompletionUsage; } export namespace ChatCompletionChunk { @@ -551,7 +554,7 @@ export namespace ChatCompletionContentPart { /** * The name of the file, used when passing the file to the model as a string. */ - file_name?: string; + filename?: string; } } } @@ -930,8 +933,11 @@ export interface ChatCompletionStreamOptions { /** * If set, an additional chunk will be streamed before the `data: [DONE]` message. * The `usage` field on this chunk shows the token usage statistics for the entire - * request, and the `choices` field will always be an empty array. All other chunks - * will also include a `usage` field, but with a null value. + * request, and the `choices` field will always be an empty array. + * + * All other chunks will also include a `usage` field, but with a null value. + * **NOTE:** If the stream is interrupted, you may not receive the final usage + * chunk which contains the total token usage for the request. */ include_usage?: boolean; } diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 72adf0696..20d67b8ac 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -1362,11 +1362,6 @@ export interface ResponseFunctionCallArgumentsDoneEvent { * for more information. */ export interface ResponseFunctionToolCall { - /** - * The unique ID of the function tool call. - */ - id: string; - /** * A JSON string of the arguments to pass to the function. */ @@ -1387,6 +1382,11 @@ export interface ResponseFunctionToolCall { */ type: 'function_call'; + /** + * The unique ID of the function tool call. + */ + id?: string; + /** * The status of the item. One of `in_progress`, `completed`, or `incomplete`. * Populated when items are returned via API. @@ -2305,6 +2305,11 @@ export interface ResponseUsage { */ input_tokens: number; + /** + * A detailed breakdown of the input tokens. + */ + input_tokens_details: ResponseUsage.InputTokensDetails; + /** * The number of output tokens. */ @@ -2322,6 +2327,17 @@ export interface ResponseUsage { } export namespace ResponseUsage { + /** + * A detailed breakdown of the input tokens. + */ + export interface InputTokensDetails { + /** + * The number of tokens that were retrieved from the cache. + * [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). + */ + cached_tokens: number; + } + /** * A detailed breakdown of the output tokens. */ diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 86b2d2dee..5fbdbba6a 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -171,10 +171,10 @@ export interface Reasoning { * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - effort: ReasoningEffort | null; + effort?: ReasoningEffort | null; /** - * **o-series models only** + * **computer_use_preview only** * * A summary of the reasoning performed by the model. This can be useful for * debugging and understanding the model's reasoning process. One of `concise` or diff --git a/tests/api-resources/batches.test.ts b/tests/api-resources/batches.test.ts index 96e200fb9..7c7397d06 100644 --- a/tests/api-resources/batches.test.ts +++ b/tests/api-resources/batches.test.ts @@ -12,7 +12,7 @@ describe('resource batches', () => { test('create: only required params', async () => { const responsePromise = client.batches.create({ completion_window: '24h', - endpoint: '/v1/chat/completions', + endpoint: '/v1/responses', input_file_id: 'input_file_id', }); const rawResponse = await responsePromise.asResponse(); @@ -27,7 +27,7 @@ describe('resource batches', () => { test('create: required and optional params', async () => { const response = await client.batches.create({ completion_window: '24h', - endpoint: '/v1/chat/completions', + endpoint: '/v1/responses', input_file_id: 'input_file_id', metadata: { foo: 'string' }, }); From ca6266eea5229056a3bc2b5e4225b9ea9eaa459e Mon Sep 17 00:00:00 2001 From: meorphis Date: Tue, 18 Mar 2025 14:08:33 -0400 Subject: [PATCH 160/246] chore(internal): add back release workflow --- .github/workflows/create-releases.yml | 50 +++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 .github/workflows/create-releases.yml diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml new file mode 100644 index 000000000..19b7dd831 --- /dev/null +++ b/.github/workflows/create-releases.yml @@ -0,0 +1,50 @@ +name: Create releases +on: + schedule: + - cron: '0 5 * * *' # every day at 5am UTC + push: + branches: + - master + +jobs: + release: + name: release + if: github.ref == 'refs/heads/master' && github.repository == 'openai/openai-node' + runs-on: ubuntu-latest + environment: publish + permissions: + contents: read + id-token: write + + steps: + - uses: actions/checkout@v4 + + - uses: stainless-api/trigger-release-please@v1 + id: release + with: + repo: ${{ github.event.repository.full_name }} + stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} + + - name: Set up Node + if: ${{ steps.release.outputs.releases_created }} + uses: actions/setup-node@v3 + with: + node-version: '18' + + - name: Install dependencies + if: ${{ steps.release.outputs.releases_created }} + run: | + yarn install + + - name: Publish to NPM + if: ${{ steps.release.outputs.releases_created }} + run: | + bash ./bin/publish-npm + env: + NPM_TOKEN: ${{ secrets.OPENAI_NPM_TOKEN || secrets.NPM_TOKEN }} + + - name: Publish to JSR + if: ${{ steps.release.outputs.releases_created }} + run: | + bash ./bin/publish-jsr + From d2be74a28dec48cd7d88db88af95e8bc608cdede Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 18 Mar 2025 18:52:33 +0000 Subject: [PATCH 161/246] fix(types): ignore missing `id` in responses pagination --- src/resources/responses/input-items.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts index 9704be89a..d622b8e58 100644 --- a/src/resources/responses/input-items.ts +++ b/src/resources/responses/input-items.ts @@ -65,6 +65,7 @@ export class InputItems extends APIResource { } export class ResponseItemListDataPage extends CursorPage< + // @ts-ignore some items don't necessarily have the `id` property | ResponseItemList.Message | ResponsesAPI.ResponseOutputMessage | ResponsesAPI.ResponseFileSearchToolCall From 454832606ebe9d5cf8ffd436eac09375f682c495 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 20:48:11 +0000 Subject: [PATCH 162/246] fix(types): improve responses type names (#1392) --- .stats.yml | 2 +- api.md | 8 +- src/resources/responses/index.ts | 7 +- src/resources/responses/input-items.ts | 210 +------------------- src/resources/responses/input-items.ts.orig | 114 +++++++++++ src/resources/responses/responses.ts | 191 +++++++++++++++--- 6 files changed, 298 insertions(+), 234 deletions(-) create mode 100644 src/resources/responses/input-items.ts.orig diff --git a/.stats.yml b/.stats.yml index 1e04d7c26..b03256223 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f763c1a35c8b9b02f1e31b9b2e09e21f98bfe8413e5079c86cbb07da2dd7779b.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f3bce04386c4fcfd5037e0477fbaa39010003fd1558eb5185fe4a71dd6a05fdd.yml diff --git a/api.md b/api.md index 2fac07f38..fd8482bf2 100644 --- a/api.md +++ b/api.md @@ -548,6 +548,8 @@ Types: - ResponseCodeInterpreterToolCall - ResponseCompletedEvent - ResponseComputerToolCall +- ResponseComputerToolCallOutputItem +- ResponseComputerToolCallOutputScreenshot - ResponseContent - ResponseContentPartAddedEvent - ResponseContentPartDoneEvent @@ -564,6 +566,8 @@ Types: - ResponseFunctionCallArgumentsDeltaEvent - ResponseFunctionCallArgumentsDoneEvent - ResponseFunctionToolCall +- ResponseFunctionToolCallItem +- ResponseFunctionToolCallOutputItem - ResponseFunctionWebSearch - ResponseInProgressEvent - ResponseIncludable @@ -575,7 +579,9 @@ Types: - ResponseInputImage - ResponseInputItem - ResponseInputMessageContentList +- ResponseInputMessageItem - ResponseInputText +- ResponseItem - ResponseOutputAudio - ResponseOutputItem - ResponseOutputItemAddedEvent @@ -616,4 +622,4 @@ Types: Methods: -- client.responses.inputItems.list(responseId, { ...params }) -> ResponseItemListDataPage +- client.responses.inputItems.list(responseId, { ...params }) -> ResponseItemsPage diff --git a/src/resources/responses/index.ts b/src/resources/responses/index.ts index 84f761a93..ad3f9a386 100644 --- a/src/resources/responses/index.ts +++ b/src/resources/responses/index.ts @@ -1,9 +1,4 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { - ResponseItemListDataPage, - InputItems, - type ResponseItemList, - type InputItemListParams, -} from './input-items'; +export { InputItems, type ResponseItemList, type InputItemListParams } from './input-items'; export { Responses } from './responses'; diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts index d622b8e58..f2292e5c6 100644 --- a/src/resources/responses/input-items.ts +++ b/src/resources/responses/input-items.ts @@ -4,7 +4,8 @@ import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; import * as Core from '../../core'; import * as ResponsesAPI from './responses'; -import { CursorPage, type CursorPageParams } from '../../pagination'; +import { ResponseItemsPage } from './responses'; +import { type CursorPageParams } from '../../pagination'; export class InputItems extends APIResource { /** @@ -14,68 +15,26 @@ export class InputItems extends APIResource { responseId: string, query?: InputItemListParams, options?: Core.RequestOptions, - ): Core.PagePromise< - ResponseItemListDataPage, - | ResponseItemList.Message - | ResponsesAPI.ResponseOutputMessage - | ResponsesAPI.ResponseFileSearchToolCall - | ResponsesAPI.ResponseComputerToolCall - | ResponseItemList.ComputerCallOutput - | ResponsesAPI.ResponseFunctionWebSearch - | ResponsesAPI.ResponseFunctionToolCall - | ResponseItemList.FunctionCallOutput - >; + ): Core.PagePromise; list( responseId: string, options?: Core.RequestOptions, - ): Core.PagePromise< - ResponseItemListDataPage, - | ResponseItemList.Message - | ResponsesAPI.ResponseOutputMessage - | ResponsesAPI.ResponseFileSearchToolCall - | ResponsesAPI.ResponseComputerToolCall - | ResponseItemList.ComputerCallOutput - | ResponsesAPI.ResponseFunctionWebSearch - | ResponsesAPI.ResponseFunctionToolCall - | ResponseItemList.FunctionCallOutput - >; + ): Core.PagePromise; list( responseId: string, query: InputItemListParams | Core.RequestOptions = {}, options?: Core.RequestOptions, - ): Core.PagePromise< - ResponseItemListDataPage, - | ResponseItemList.Message - | ResponsesAPI.ResponseOutputMessage - | ResponsesAPI.ResponseFileSearchToolCall - | ResponsesAPI.ResponseComputerToolCall - | ResponseItemList.ComputerCallOutput - | ResponsesAPI.ResponseFunctionWebSearch - | ResponsesAPI.ResponseFunctionToolCall - | ResponseItemList.FunctionCallOutput - > { + ): Core.PagePromise { if (isRequestOptions(query)) { return this.list(responseId, {}, query); } - return this._client.getAPIList(`/responses/${responseId}/input_items`, ResponseItemListDataPage, { + return this._client.getAPIList(`/responses/${responseId}/input_items`, ResponseItemsPage, { query, ...options, }); } } -export class ResponseItemListDataPage extends CursorPage< - // @ts-ignore some items don't necessarily have the `id` property - | ResponseItemList.Message - | ResponsesAPI.ResponseOutputMessage - | ResponsesAPI.ResponseFileSearchToolCall - | ResponsesAPI.ResponseComputerToolCall - | ResponseItemList.ComputerCallOutput - | ResponsesAPI.ResponseFunctionWebSearch - | ResponsesAPI.ResponseFunctionToolCall - | ResponseItemList.FunctionCallOutput -> {} - /** * A list of Response items. */ @@ -83,16 +42,7 @@ export interface ResponseItemList { /** * A list of items used to generate this response. */ - data: Array< - | ResponseItemList.Message - | ResponsesAPI.ResponseOutputMessage - | ResponsesAPI.ResponseFileSearchToolCall - | ResponsesAPI.ResponseComputerToolCall - | ResponseItemList.ComputerCallOutput - | ResponsesAPI.ResponseFunctionWebSearch - | ResponsesAPI.ResponseFunctionToolCall - | ResponseItemList.FunctionCallOutput - >; + data: Array; /** * The ID of the first item in the list. @@ -115,142 +65,6 @@ export interface ResponseItemList { object: 'list'; } -export namespace ResponseItemList { - export interface Message { - /** - * The unique ID of the message input. - */ - id: string; - - /** - * A list of one or many input items to the model, containing different content - * types. - */ - content: ResponsesAPI.ResponseInputMessageContentList; - - /** - * The role of the message input. One of `user`, `system`, or `developer`. - */ - role: 'user' | 'system' | 'developer'; - - /** - * The status of item. One of `in_progress`, `completed`, or `incomplete`. - * Populated when items are returned via API. - */ - status?: 'in_progress' | 'completed' | 'incomplete'; - - /** - * The type of the message input. Always set to `message`. - */ - type?: 'message'; - } - - export interface ComputerCallOutput { - /** - * The unique ID of the computer call tool output. - */ - id: string; - - /** - * The ID of the computer tool call that produced the output. - */ - call_id: string; - - /** - * A computer screenshot image used with the computer use tool. - */ - output: ComputerCallOutput.Output; - - /** - * The type of the computer tool call output. Always `computer_call_output`. - */ - type: 'computer_call_output'; - - /** - * The safety checks reported by the API that have been acknowledged by the - * developer. - */ - acknowledged_safety_checks?: Array; - - /** - * The status of the message input. One of `in_progress`, `completed`, or - * `incomplete`. Populated when input items are returned via API. - */ - status?: 'in_progress' | 'completed' | 'incomplete'; - } - - export namespace ComputerCallOutput { - /** - * A computer screenshot image used with the computer use tool. - */ - export interface Output { - /** - * Specifies the event type. For a computer screenshot, this property is always set - * to `computer_screenshot`. - */ - type: 'computer_screenshot'; - - /** - * The identifier of an uploaded file that contains the screenshot. - */ - file_id?: string; - - /** - * The URL of the screenshot image. - */ - image_url?: string; - } - - /** - * A pending safety check for the computer call. - */ - export interface AcknowledgedSafetyCheck { - /** - * The ID of the pending safety check. - */ - id: string; - - /** - * The type of the pending safety check. - */ - code: string; - - /** - * Details about the pending safety check. - */ - message: string; - } - } - - export interface FunctionCallOutput { - /** - * The unique ID of the function call tool output. - */ - id: string; - - /** - * The unique ID of the function tool call generated by the model. - */ - call_id: string; - - /** - * A JSON string of the output of the function tool call. - */ - output: string; - - /** - * The type of the function tool call output. Always `function_call_output`. - */ - type: 'function_call_output'; - - /** - * The status of the item. One of `in_progress`, `completed`, or `incomplete`. - * Populated when items are returned via API. - */ - status?: 'in_progress' | 'completed' | 'incomplete'; - } -} - export interface InputItemListParams extends CursorPageParams { /** * An item ID to list items before, used in pagination. @@ -266,12 +80,8 @@ export interface InputItemListParams extends CursorPageParams { order?: 'asc' | 'desc'; } -InputItems.ResponseItemListDataPage = ResponseItemListDataPage; - export declare namespace InputItems { - export { - type ResponseItemList as ResponseItemList, - ResponseItemListDataPage as ResponseItemListDataPage, - type InputItemListParams as InputItemListParams, - }; + export { type ResponseItemList as ResponseItemList, type InputItemListParams as InputItemListParams }; } + +export { ResponseItemsPage }; diff --git a/src/resources/responses/input-items.ts.orig b/src/resources/responses/input-items.ts.orig new file mode 100644 index 000000000..470740b61 --- /dev/null +++ b/src/resources/responses/input-items.ts.orig @@ -0,0 +1,114 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as ResponsesAPI from './responses'; +import { ResponseItemsPage } from './responses'; +import { type CursorPageParams } from '../../pagination'; + +export class InputItems extends APIResource { + /** + * Returns a list of input items for a given response. + */ + list( + responseId: string, + query?: InputItemListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + responseId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + responseId: string, + query: InputItemListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(responseId, {}, query); + } + return this._client.getAPIList(`/responses/${responseId}/input_items`, ResponseItemsPage, { + query, + ...options, + }); + } +} + +<<<<<<< HEAD +export class ResponseItemListDataPage extends CursorPage< + // @ts-ignore some items don't necessarily have the `id` property + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput +> {} + +||||||| parent of e5ea4a71 (fix(types): improve responses type names (#1392)) +export class ResponseItemListDataPage extends CursorPage< + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput +> {} + +======= +>>>>>>> e5ea4a71 (fix(types): improve responses type names (#1392)) +/** + * A list of Response items. + */ +export interface ResponseItemList { + /** + * A list of items used to generate this response. + */ + data: Array; + + /** + * The ID of the first item in the list. + */ + first_id: string; + + /** + * Whether there are more items available. + */ + has_more: boolean; + + /** + * The ID of the last item in the list. + */ + last_id: string; + + /** + * The type of object returned, must be `list`. + */ + object: 'list'; +} + +export interface InputItemListParams extends CursorPageParams { + /** + * An item ID to list items before, used in pagination. + */ + before?: string; + + /** + * The order to return the input items in. Default is `asc`. + * + * - `asc`: Return the input items in ascending order. + * - `desc`: Return the input items in descending order. + */ + order?: 'asc' | 'desc'; +} + +export declare namespace InputItems { + export { type ResponseItemList as ResponseItemList, type InputItemListParams as InputItemListParams }; +} + +export { ResponseItemsPage }; diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 20d67b8ac..b2cd6b56c 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -9,12 +9,13 @@ import { import * as Core from '../../core'; import { APIPromise, isRequestOptions } from '../../core'; import { APIResource } from '../../resource'; -import { Stream } from '../../streaming'; import * as Shared from '../shared'; import * as InputItemsAPI from './input-items'; -import { InputItemListParams, InputItems, ResponseItemList, ResponseItemListDataPage } from './input-items'; +import { InputItemListParams, InputItems, ResponseItemList } from './input-items'; import * as ResponsesAPI from './responses'; import { ResponseStream, ResponseStreamParams } from '../../lib/responses/ResponseStream'; +import { CursorPage } from '../../pagination'; +import { Stream } from '../../streaming'; export interface ParsedResponseOutputText extends ResponseOutputText { parsed: ParsedT | null; @@ -137,6 +138,8 @@ export class Responses extends APIResource { } } +export class ResponseItemsPage extends CursorPage {} + /** * A tool that controls a virtual computer. Learn more about the * [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). @@ -966,6 +969,83 @@ export namespace ResponseComputerToolCall { } } +export interface ResponseComputerToolCallOutputItem { + /** + * The unique ID of the computer call tool output. + */ + id: string; + + /** + * The ID of the computer tool call that produced the output. + */ + call_id: string; + + /** + * A computer screenshot image used with the computer use tool. + */ + output: ResponseComputerToolCallOutputScreenshot; + + /** + * The type of the computer tool call output. Always `computer_call_output`. + */ + type: 'computer_call_output'; + + /** + * The safety checks reported by the API that have been acknowledged by the + * developer. + */ + acknowledged_safety_checks?: Array; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + +export namespace ResponseComputerToolCallOutputItem { + /** + * A pending safety check for the computer call. + */ + export interface AcknowledgedSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } +} + +/** + * A computer screenshot image used with the computer use tool. + */ +export interface ResponseComputerToolCallOutputScreenshot { + /** + * Specifies the event type. For a computer screenshot, this property is always set + * to `computer_screenshot`. + */ + type: 'computer_screenshot'; + + /** + * The identifier of an uploaded file that contains the screenshot. + */ + file_id?: string; + + /** + * The URL of the screenshot image. + */ + image_url?: string; +} + /** * Multi-modal input and output contents. */ @@ -1394,6 +1474,46 @@ export interface ResponseFunctionToolCall { status?: 'in_progress' | 'completed' | 'incomplete'; } +/** + * A tool call to run a function. See the + * [function calling guide](https://platform.openai.com/docs/guides/function-calling) + * for more information. + */ +export interface ResponseFunctionToolCallItem extends ResponseFunctionToolCall { + /** + * The unique ID of the function call tool output. + */ + id: string; +} + +export interface ResponseFunctionToolCallOutputItem { + /** + * The unique ID of the function call tool output. + */ + id: string; + + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * A JSON string of the output of the function tool call. + */ + output: string; + + /** + * The type of the function tool call output. Always `function_call_output`. + */ + type: 'function_call_output'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + /** * The results of a web search tool call. See the * [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for @@ -1607,7 +1727,7 @@ export namespace ResponseInputItem { /** * A computer screenshot image used with the computer use tool. */ - output: ComputerCallOutput.Output; + output: ResponsesAPI.ResponseComputerToolCallOutputScreenshot; /** * The type of the computer tool call output. Always `computer_call_output`. @@ -1633,27 +1753,6 @@ export namespace ResponseInputItem { } export namespace ComputerCallOutput { - /** - * A computer screenshot image used with the computer use tool. - */ - export interface Output { - /** - * Specifies the event type. For a computer screenshot, this property is always set - * to `computer_screenshot`. - */ - type: 'computer_screenshot'; - - /** - * The identifier of an uploaded file that contains the screenshot. - */ - file_id?: string; - - /** - * The URL of the screenshot image. - */ - image_url?: string; - } - /** * A pending safety check for the computer call. */ @@ -1729,6 +1828,35 @@ export namespace ResponseInputItem { */ export type ResponseInputMessageContentList = Array; +export interface ResponseInputMessageItem { + /** + * The unique ID of the message input. + */ + id: string; + + /** + * A list of one or many input items to the model, containing different content + * types. + */ + content: ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The status of item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the message input. Always set to `message`. + */ + type?: 'message'; +} + /** * A text input to the model. */ @@ -1744,6 +1872,19 @@ export interface ResponseInputText { type: 'input_text'; } +/** + * Content item used to generate a response. + */ +export type ResponseItem = + | ResponseInputMessageItem + | ResponseOutputMessage + | ResponseFileSearchToolCall + | ResponseComputerToolCall + | ResponseComputerToolCallOutputItem + | ResponseFunctionWebSearch + | ResponseFunctionToolCallItem + | ResponseFunctionToolCallOutputItem; + /** * An audio output from the model. */ @@ -2722,13 +2863,11 @@ export interface ResponseRetrieveParams { } Responses.InputItems = InputItems; -Responses.ResponseItemListDataPage = ResponseItemListDataPage; export declare namespace Responses { export { InputItems as InputItems, type ResponseItemList as ResponseItemList, - ResponseItemListDataPage as ResponseItemListDataPage, type InputItemListParams as InputItemListParams, }; } From d9277683745a854e52ac165a67840a09049e5077 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 20:48:40 +0000 Subject: [PATCH 163/246] release: 4.87.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 23 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 0c7a85094..a3649b199 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.87.3" + ".": "4.87.4" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 46a595495..d820d8fcd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 4.87.4 (2025-03-18) + +Full Changelog: [v4.87.3...v4.87.4](https://github.com/openai/openai-node/compare/v4.87.3...v4.87.4) + +### Bug Fixes + +* **api:** correct some Responses types ([#1391](https://github.com/openai/openai-node/issues/1391)) ([af45876](https://github.com/openai/openai-node/commit/af458766ac721fb6cf18e7d78c458506c8bfc4e1)) +* **types:** ignore missing `id` in responses pagination ([1b9d20e](https://github.com/openai/openai-node/commit/1b9d20e71f5afbd4999f1999fe4810175476c5d2)) +* **types:** improve responses type names ([#1392](https://github.com/openai/openai-node/issues/1392)) ([164f476](https://github.com/openai/openai-node/commit/164f47606b41fd3e2850f8209eb1c6e2996a81ff)) + + +### Chores + +* add missing type alias exports ([#1390](https://github.com/openai/openai-node/issues/1390)) ([16c5e22](https://github.com/openai/openai-node/commit/16c5e2261c8c1a0ba96c2d5f475e8b1bc67387d7)) +* **internal:** add back release workflow ([dddf29b](https://github.com/openai/openai-node/commit/dddf29bd914a02d4586b239ec06217389a4409f9)) +* **internal:** remove CI condition ([#1381](https://github.com/openai/openai-node/issues/1381)) ([ef17981](https://github.com/openai/openai-node/commit/ef17981a0bd6b3e971986ece829c5d260d7392d4)) +* **internal:** run CI on update-specs branch ([9fc2130](https://github.com/openai/openai-node/commit/9fc2130b74a5919a3bbd41926903bdb310de4446)) +* **internal:** update release workflows ([90b77d0](https://github.com/openai/openai-node/commit/90b77d09c04d21487aa38fe775c79ae632136813)) + ## 4.87.3 (2025-03-11) Full Changelog: [v4.87.2...v4.87.3](https://github.com/openai/openai-node/compare/v4.87.2...v4.87.3) diff --git a/jsr.json b/jsr.json index 1051fade0..3e7c40d5f 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.87.3", + "version": "4.87.4", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 9967a814d..baddade77 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.87.3", + "version": "4.87.4", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index e84192528..172c899ea 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.87.3'; // x-release-please-version +export const VERSION = '4.87.4'; // x-release-please-version From 2e495267329b6853edff76c415e4c5ddc5e143e8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 20:50:59 +0000 Subject: [PATCH 164/246] chore(internal): version bump (#1393) --- jsr.json.orig | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 jsr.json.orig diff --git a/jsr.json.orig b/jsr.json.orig new file mode 100644 index 000000000..c7b99a6f6 --- /dev/null +++ b/jsr.json.orig @@ -0,0 +1,25 @@ +{ + "name": "@openai/openai", +<<<<<<< HEAD + "version": "4.87.4", + "exports": { + ".": "./index.ts", + "./helpers/zod": "./helpers/zod.ts", + "./beta/realtime/websocket": "./beta/realtime/websocket.ts" + }, + "imports": { + "zod": "npm:zod@3" + }, +||||||| parent of 0603bcac (chore(internal): version bump (#1393)) + "version": "4.87.3", + "exports": "./index.ts", +======= + "version": "4.87.4", + "exports": "./index.ts", +>>>>>>> 0603bcac (chore(internal): version bump (#1393)) + "publish": { + "exclude": [ + "!." + ] + } +} From 023d106185abf62f892bff66faf617eb45777004 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 18:18:34 +0000 Subject: [PATCH 165/246] chore(exports): cleaner resource index imports (#1396) --- src/resources.ts | 1 + 1 file changed, 1 insertion(+) create mode 100644 src/resources.ts diff --git a/src/resources.ts b/src/resources.ts new file mode 100644 index 000000000..b283d5781 --- /dev/null +++ b/src/resources.ts @@ -0,0 +1 @@ +export * from './resources/index'; From 7c3d212b47ee3090f5bbb82dd21026ba532da6e0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 18:28:57 +0000 Subject: [PATCH 166/246] chore(exports): stop using path fallbacks (#1397) --- package.json | 35 +++++++---------------------------- 1 file changed, 7 insertions(+), 28 deletions(-) diff --git a/package.json b/package.json index baddade77..ac540cf0e 100644 --- a/package.json +++ b/package.json @@ -112,38 +112,17 @@ "default": "./dist/index.mjs" }, "./*.mjs": { - "types": [ - "./dist/*.d.ts", - "./dist/*/index.d.ts" - ], - "default": [ - "./dist/*.mjs", - "./dist/*/index.mjs" - ] + "types": "./dist/*.d.ts", + "default": "./dist/*.mjs" }, "./*.js": { - "types": [ - "./dist/*.d.ts", - "./dist/*/index.d.ts" - ], - "default": [ - "./dist/*.js", - "./dist/*/index.js" - ] + "types": "./dist/*.d.ts", + "default": "./dist/*.js" }, "./*": { - "types": [ - "./dist/*.d.ts", - "./dist/*/index.d.ts" - ], - "require": [ - "./dist/*.js", - "./dist/*/index.js" - ], - "default": [ - "./dist/*.mjs", - "./dist/*/index.mjs" - ] + "types": "./dist/*.d.ts", + "require": "./dist/*.js", + "default": "./dist/*.mjs" } }, "bin": "./bin/cli", From aefd2675154ff848032a7fec856f0db6ed2ad629 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 20:35:03 +0000 Subject: [PATCH 167/246] feat(api): o1-pro now available through the API (#1398) --- .stats.yml | 2 +- api.md | 2 ++ src/index.ts | 2 ++ src/resources/responses/responses.ts | 6 +++--- src/resources/shared.ts | 27 ++++++++++++++++++++++----- 5 files changed, 30 insertions(+), 9 deletions(-) diff --git a/.stats.yml b/.stats.yml index b03256223..e0b06dc22 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f3bce04386c4fcfd5037e0477fbaa39010003fd1558eb5185fe4a71dd6a05fdd.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b26121d5df6eb5d3032a45a267473798b15fcfec76dd44a3256cf1238be05fa4.yml diff --git a/api.md b/api.md index fd8482bf2..9b3aec141 100644 --- a/api.md +++ b/api.md @@ -2,6 +2,7 @@ Types: +- AllModels - ChatModel - ComparisonFilter - CompoundFilter @@ -14,6 +15,7 @@ Types: - ResponseFormatJSONObject - ResponseFormatJSONSchema - ResponseFormatText +- ResponsesModel # Completions diff --git a/src/index.ts b/src/index.ts index 34cc3e84d..931894f2f 100644 --- a/src/index.ts +++ b/src/index.ts @@ -508,6 +508,7 @@ export declare namespace OpenAI { export { Responses as Responses }; + export type AllModels = API.AllModels; export type ChatModel = API.ChatModel; export type ComparisonFilter = API.ComparisonFilter; export type CompoundFilter = API.CompoundFilter; @@ -520,6 +521,7 @@ export declare namespace OpenAI { export type ResponseFormatJSONObject = API.ResponseFormatJSONObject; export type ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; export type ResponseFormatText = API.ResponseFormatText; + export type ResponsesModel = API.ResponsesModel; } // ---------------------- Azure ---------------------- diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index b2cd6b56c..b90d415bd 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -327,7 +327,7 @@ export interface Response { * [model guide](https://platform.openai.com/docs/models) to browse and compare * available models. */ - model: (string & {}) | Shared.ChatModel; + model: Shared.ResponsesModel; /** * The object type of this resource - always set to `response`. @@ -1481,7 +1481,7 @@ export interface ResponseFunctionToolCall { */ export interface ResponseFunctionToolCallItem extends ResponseFunctionToolCall { /** - * The unique ID of the function call tool output. + * The unique ID of the function tool call. */ id: string; } @@ -2679,7 +2679,7 @@ export interface ResponseCreateParamsBase { * [model guide](https://platform.openai.com/docs/models) to browse and compare * available models. */ - model: (string & {}) | Shared.ChatModel; + model: Shared.ResponsesModel; /** * Specify additional output data to include in the model response. Currently diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 5fbdbba6a..2c0fb1c32 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,5 +1,15 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export type AllModels = + | string + | ChatModel + | string + | ChatModel + | 'o1-pro' + | 'o1-pro-2025-03-19' + | 'computer-use-preview' + | 'computer-use-preview-2025-03-11'; + export type ChatModel = | 'o3-mini' | 'o3-mini-2025-01-31' @@ -9,11 +19,6 @@ export type ChatModel = | 'o1-preview-2024-09-12' | 'o1-mini' | 'o1-mini-2024-09-12' - | 'computer-use-preview' - | 'computer-use-preview-2025-02-04' - | 'computer-use-preview-2025-03-11' - | 'gpt-4.5-preview' - | 'gpt-4.5-preview-2025-02-27' | 'gpt-4o' | 'gpt-4o-2024-11-20' | 'gpt-4o-2024-08-06' @@ -23,6 +28,10 @@ export type ChatModel = | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' + | 'gpt-4o-search-preview' + | 'gpt-4o-mini-search-preview' + | 'gpt-4o-search-preview-2025-03-11' + | 'gpt-4o-mini-search-preview-2025-03-11' | 'chatgpt-4o-latest' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' @@ -265,3 +274,11 @@ export interface ResponseFormatText { */ type: 'text'; } + +export type ResponsesModel = + | (string & {}) + | ChatModel + | 'o1-pro' + | 'o1-pro-2025-03-19' + | 'computer-use-preview' + | 'computer-use-preview-2025-03-11'; From 20e97a4373711f0380f488477c3888e90d7134ac Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 21:04:38 +0000 Subject: [PATCH 168/246] release: 4.88.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 15 +++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 19 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a3649b199..424ace296 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.87.4" + ".": "4.88.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d820d8fcd..e2a73af85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## 4.88.0 (2025-03-19) + +Full Changelog: [v4.87.4...v4.88.0](https://github.com/openai/openai-node/compare/v4.87.4...v4.88.0) + +### Features + +* **api:** o1-pro now available through the API ([#1398](https://github.com/openai/openai-node/issues/1398)) ([616a7e9](https://github.com/openai/openai-node/commit/616a7e90e764882cd749a65af8cc6ae8734fc80d)) + + +### Chores + +* **exports:** cleaner resource index imports ([#1396](https://github.com/openai/openai-node/issues/1396)) ([26b0856](https://github.com/openai/openai-node/commit/26b0856cd63846c34b75895a1ea42ceec7908c1a)) +* **exports:** stop using path fallbacks ([#1397](https://github.com/openai/openai-node/issues/1397)) ([d1479c2](https://github.com/openai/openai-node/commit/d1479c23aff68dd46c73fd31896dd2298a6bf140)) +* **internal:** version bump ([#1393](https://github.com/openai/openai-node/issues/1393)) ([7f16c3a](https://github.com/openai/openai-node/commit/7f16c3aa7b1ab36541219c5a0f93fc518733d0e3)) + ## 4.87.4 (2025-03-18) Full Changelog: [v4.87.3...v4.87.4](https://github.com/openai/openai-node/compare/v4.87.3...v4.87.4) diff --git a/jsr.json b/jsr.json index 3e7c40d5f..ed87ee6d0 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.87.4", + "version": "4.88.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index ac540cf0e..471fafc31 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.87.4", + "version": "4.88.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 172c899ea..c56dab45e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.87.4'; // x-release-please-version +export const VERSION = '4.88.0'; // x-release-please-version From d11b13cdf5412f03e551365297a27e610a36edda Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 16:13:23 +0000 Subject: [PATCH 169/246] feat(api): new models for TTS, STT, + new audio features for Realtime (#1407) --- .stats.yml | 4 +- api.md | 18 + src/resources/audio/audio.ts | 17 +- src/resources/audio/index.ts | 6 + src/resources/audio/speech.ts | 10 +- src/resources/audio/transcriptions.ts | 224 +++++++- src/resources/audio/translations.ts | 2 +- src/resources/beta/realtime/index.ts | 5 + src/resources/beta/realtime/realtime.ts | 522 ++++++++++++++++-- src/resources/beta/realtime/sessions.ts | 236 ++++++-- .../beta/realtime/transcription-sessions.ts | 308 +++++++++++ src/resources/chat/completions/completions.ts | 2 +- tests/api-resources/audio/speech.test.ts | 1 + .../audio/transcriptions.test.ts | 6 +- .../realtime/transcription-sessions.test.ts | 22 + 15 files changed, 1247 insertions(+), 136 deletions(-) create mode 100644 src/resources/beta/realtime/transcription-sessions.ts create mode 100644 tests/api-resources/beta/realtime/transcription-sessions.test.ts diff --git a/.stats.yml b/.stats.yml index e0b06dc22..abb937131 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b26121d5df6eb5d3032a45a267473798b15fcfec76dd44a3256cf1238be05fa4.yml +configured_endpoints: 82 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c22f59c66aec7914b6ee653d3098d1c1c8c16c180d2a158e819c8ddbf476f74b.yml diff --git a/api.md b/api.md index 9b3aec141..cf464cf63 100644 --- a/api.md +++ b/api.md @@ -142,7 +142,11 @@ Types: Types: - Transcription +- TranscriptionInclude - TranscriptionSegment +- TranscriptionStreamEvent +- TranscriptionTextDeltaEvent +- TranscriptionTextDoneEvent - TranscriptionVerbose - TranscriptionWord - TranscriptionCreateResponse @@ -306,7 +310,9 @@ Types: - ConversationItemDeleteEvent - ConversationItemDeletedEvent - ConversationItemInputAudioTranscriptionCompletedEvent +- ConversationItemInputAudioTranscriptionDeltaEvent - ConversationItemInputAudioTranscriptionFailedEvent +- ConversationItemRetrieveEvent - ConversationItemTruncateEvent - ConversationItemTruncatedEvent - ConversationItemWithReference @@ -343,6 +349,8 @@ Types: - SessionCreatedEvent - SessionUpdateEvent - SessionUpdatedEvent +- TranscriptionSessionUpdate +- TranscriptionSessionUpdatedEvent ### Sessions @@ -355,6 +363,16 @@ Methods: - client.beta.realtime.sessions.create({ ...params }) -> SessionCreateResponse +### TranscriptionSessions + +Types: + +- TranscriptionSession + +Methods: + +- client.beta.realtime.transcriptionSessions.create({ ...params }) -> TranscriptionSession + ## Assistants Types: diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts index b9a7ad4f8..071fe5929 100644 --- a/src/resources/audio/audio.ts +++ b/src/resources/audio/audio.ts @@ -7,8 +7,14 @@ import * as TranscriptionsAPI from './transcriptions'; import { Transcription, TranscriptionCreateParams, + TranscriptionCreateParamsNonStreaming, + TranscriptionCreateParamsStreaming, TranscriptionCreateResponse, + TranscriptionInclude, TranscriptionSegment, + TranscriptionStreamEvent, + TranscriptionTextDeltaEvent, + TranscriptionTextDoneEvent, TranscriptionVerbose, TranscriptionWord, Transcriptions, @@ -28,11 +34,12 @@ export class Audio extends APIResource { speech: SpeechAPI.Speech = new SpeechAPI.Speech(this._client); } -export type AudioModel = 'whisper-1'; +export type AudioModel = 'whisper-1' | 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe'; /** * The format of the output, in one of these options: `json`, `text`, `srt`, - * `verbose_json`, or `vtt`. + * `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + * the only supported format is `json`. */ export type AudioResponseFormat = 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt'; @@ -46,11 +53,17 @@ export declare namespace Audio { export { Transcriptions as Transcriptions, type Transcription as Transcription, + type TranscriptionInclude as TranscriptionInclude, type TranscriptionSegment as TranscriptionSegment, + type TranscriptionStreamEvent as TranscriptionStreamEvent, + type TranscriptionTextDeltaEvent as TranscriptionTextDeltaEvent, + type TranscriptionTextDoneEvent as TranscriptionTextDoneEvent, type TranscriptionVerbose as TranscriptionVerbose, type TranscriptionWord as TranscriptionWord, type TranscriptionCreateResponse as TranscriptionCreateResponse, type TranscriptionCreateParams as TranscriptionCreateParams, + type TranscriptionCreateParamsNonStreaming as TranscriptionCreateParamsNonStreaming, + type TranscriptionCreateParamsStreaming as TranscriptionCreateParamsStreaming, }; export { diff --git a/src/resources/audio/index.ts b/src/resources/audio/index.ts index 2bbe9e3ab..deed39ede 100644 --- a/src/resources/audio/index.ts +++ b/src/resources/audio/index.ts @@ -5,11 +5,17 @@ export { Speech, type SpeechModel, type SpeechCreateParams } from './speech'; export { Transcriptions, type Transcription, + type TranscriptionInclude, type TranscriptionSegment, + type TranscriptionStreamEvent, + type TranscriptionTextDeltaEvent, + type TranscriptionTextDoneEvent, type TranscriptionVerbose, type TranscriptionWord, type TranscriptionCreateResponse, type TranscriptionCreateParams, + type TranscriptionCreateParamsNonStreaming, + type TranscriptionCreateParamsStreaming, } from './transcriptions'; export { Translations, diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 35e82c4c1..4324028d5 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -18,7 +18,7 @@ export class Speech extends APIResource { } } -export type SpeechModel = 'tts-1' | 'tts-1-hd'; +export type SpeechModel = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts'; export interface SpeechCreateParams { /** @@ -28,7 +28,7 @@ export interface SpeechCreateParams { /** * One of the available [TTS models](https://platform.openai.com/docs/models#tts): - * `tts-1` or `tts-1-hd` + * `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. */ model: (string & {}) | SpeechModel; @@ -40,6 +40,12 @@ export interface SpeechCreateParams { */ voice: 'alloy' | 'ash' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer'; + /** + * Control the voice of your generated audio with additional instructions. Does not + * work with `tts-1` or `tts-1-hd`. + */ + instructions?: string; + /** * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, * `wav`, and `pcm`. diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 6fbe96b58..7f797c709 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -2,29 +2,42 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; +import * as TranscriptionsAPI from './transcriptions'; import * as AudioAPI from './audio'; +import { Stream } from '../../streaming'; export class Transcriptions extends APIResource { /** * Transcribes audio into the input language. */ create( - body: TranscriptionCreateParams<'json' | undefined>, + body: TranscriptionCreateParamsNonStreaming<'json' | undefined>, options?: Core.RequestOptions, ): Core.APIPromise; create( - body: TranscriptionCreateParams<'verbose_json'>, + body: TranscriptionCreateParamsNonStreaming<'verbose_json'>, options?: Core.RequestOptions, ): Core.APIPromise; create( - body: TranscriptionCreateParams<'srt' | 'vtt' | 'text'>, + body: TranscriptionCreateParamsNonStreaming<'srt' | 'vtt' | 'text'>, options?: Core.RequestOptions, ): Core.APIPromise; - create(body: TranscriptionCreateParams, options?: Core.RequestOptions): Core.APIPromise; + create( + body: TranscriptionCreateParamsNonStreaming, + options?: Core.RequestOptions, + ): Core.APIPromise; + create( + body: TranscriptionCreateParamsStreaming, + options?: Core.RequestOptions, + ): Core.APIPromise>; + create( + body: TranscriptionCreateParamsStreaming, + options?: Core.RequestOptions, + ): Core.APIPromise>; create( body: TranscriptionCreateParams, options?: Core.RequestOptions, - ): Core.APIPromise { + ): Core.APIPromise> { return this._client.post( '/audio/transcriptions', Core.multipartFormRequestOptions({ body, ...options, __metadata: { model: body.model } }), @@ -41,8 +54,36 @@ export interface Transcription { * The transcribed text. */ text: string; + + /** + * The log probabilities of the tokens in the transcription. Only returned with the + * models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added + * to the `include` array. + */ + logprobs?: Array; } +export namespace Transcription { + export interface Logprob { + /** + * The token in the transcription. + */ + token?: string; + + /** + * The bytes of the token. + */ + bytes?: Array; + + /** + * The log probability of the token. + */ + logprob?: number; + } +} + +export type TranscriptionInclude = 'logprobs'; + export interface TranscriptionSegment { /** * Unique identifier of the segment. @@ -98,6 +139,103 @@ export interface TranscriptionSegment { tokens: Array; } +/** + * Emitted when there is an additional text delta. This is also the first event + * emitted when the transcription starts. Only emitted when you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `Stream` parameter set to `true`. + */ +export type TranscriptionStreamEvent = TranscriptionTextDeltaEvent | TranscriptionTextDoneEvent; + +/** + * Emitted when there is an additional text delta. This is also the first event + * emitted when the transcription starts. Only emitted when you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `Stream` parameter set to `true`. + */ +export interface TranscriptionTextDeltaEvent { + /** + * The text delta that was additionally transcribed. + */ + delta: string; + + /** + * The type of the event. Always `transcript.text.delta`. + */ + type: 'transcript.text.delta'; + + /** + * The log probabilities of the delta. Only included if you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `include[]` parameter set to `logprobs`. + */ + logprobs?: Array; +} + +export namespace TranscriptionTextDeltaEvent { + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token?: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes?: Array; + + /** + * The log probability of the token. + */ + logprob?: number; + } +} + +/** + * Emitted when the transcription is complete. Contains the complete transcription + * text. Only emitted when you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `Stream` parameter set to `true`. + */ +export interface TranscriptionTextDoneEvent { + /** + * The text that was transcribed. + */ + text: string; + + /** + * The type of the event. Always `transcript.text.done`. + */ + type: 'transcript.text.done'; + + /** + * The log probabilities of the individual tokens in the transcription. Only + * included if you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `include[]` parameter set to `logprobs`. + */ + logprobs?: Array; +} + +export namespace TranscriptionTextDoneEvent { + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token?: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes?: Array; + + /** + * The log probability of the token. + */ + logprob?: number; + } +} + /** * Represents a verbose json transcription response returned by model, based on the * provided input. @@ -152,7 +290,11 @@ export interface TranscriptionWord { */ export type TranscriptionCreateResponse = Transcription | TranscriptionVerbose; -export interface TranscriptionCreateParams< +export type TranscriptionCreateParams< + ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined, +> = TranscriptionCreateParamsNonStreaming | TranscriptionCreateParamsStreaming; + +export interface TranscriptionCreateParamsBase< ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined, > { /** @@ -162,11 +304,21 @@ export interface TranscriptionCreateParams< file: Core.Uploadable; /** - * ID of the model to use. Only `whisper-1` (which is powered by our open source - * Whisper V2 model) is currently available. + * ID of the model to use. The options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + * Whisper V2 model). */ model: (string & {}) | AudioAPI.AudioModel; + /** + * Additional information to include in the transcription response. `logprobs` will + * return the log probabilities of the tokens in the response to understand the + * model's confidence in the transcription. `logprobs` only works with + * response_format set to `json` and only with the models `gpt-4o-transcribe` and + * `gpt-4o-mini-transcribe`. + */ + include?: Array; + /** * The language of the input audio. Supplying the input language in * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) @@ -184,10 +336,23 @@ export interface TranscriptionCreateParams< /** * The format of the output, in one of these options: `json`, `text`, `srt`, - * `verbose_json`, or `vtt`. + * `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + * the only supported format is `json`. */ response_format?: ResponseFormat; + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + * for more information. + * + * Note: Streaming is not supported for the `whisper-1` model and will be ignored. + */ + stream?: boolean | null; + /** * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the * output more random, while lower values like 0.2 will make it more focused and @@ -207,13 +372,54 @@ export interface TranscriptionCreateParams< timestamp_granularities?: Array<'word' | 'segment'>; } +export namespace TranscriptionCreateParams { + export type TranscriptionCreateParamsNonStreaming = TranscriptionsAPI.TranscriptionCreateParamsNonStreaming; + export type TranscriptionCreateParamsStreaming = TranscriptionsAPI.TranscriptionCreateParamsStreaming; +} + +export interface TranscriptionCreateParamsNonStreaming< + ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined, +> extends TranscriptionCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + * for more information. + * + * Note: Streaming is not supported for the `whisper-1` model and will be ignored. + */ + stream?: false | null; +} + +export interface TranscriptionCreateParamsStreaming extends TranscriptionCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + * for more information. + * + * Note: Streaming is not supported for the `whisper-1` model and will be ignored. + */ + stream: true; +} + export declare namespace Transcriptions { export { type Transcription as Transcription, + type TranscriptionInclude as TranscriptionInclude, type TranscriptionSegment as TranscriptionSegment, + type TranscriptionStreamEvent as TranscriptionStreamEvent, + type TranscriptionTextDeltaEvent as TranscriptionTextDeltaEvent, + type TranscriptionTextDoneEvent as TranscriptionTextDoneEvent, type TranscriptionVerbose as TranscriptionVerbose, type TranscriptionWord as TranscriptionWord, type TranscriptionCreateResponse as TranscriptionCreateResponse, type TranscriptionCreateParams as TranscriptionCreateParams, + type TranscriptionCreateParamsNonStreaming as TranscriptionCreateParamsNonStreaming, + type TranscriptionCreateParamsStreaming as TranscriptionCreateParamsStreaming, }; } diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index dac519ede..df312f876 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -88,7 +88,7 @@ export interface TranslationCreateParams< * The format of the output, in one of these options: `json`, `text`, `srt`, * `verbose_json`, or `vtt`. */ - response_format?: ResponseFormat; + response_format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt'; /** * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the diff --git a/src/resources/beta/realtime/index.ts b/src/resources/beta/realtime/index.ts index 66c3ecaae..ba51d8a66 100644 --- a/src/resources/beta/realtime/index.ts +++ b/src/resources/beta/realtime/index.ts @@ -2,3 +2,8 @@ export { Realtime } from './realtime'; export { Sessions, type Session, type SessionCreateResponse, type SessionCreateParams } from './sessions'; +export { + TranscriptionSessions, + type TranscriptionSession, + type TranscriptionSessionCreateParams, +} from './transcription-sessions'; diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index 5e2b1c833..d0a74840b 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -10,9 +10,17 @@ import { SessionCreateResponse, Sessions, } from './sessions'; +import * as TranscriptionSessionsAPI from './transcription-sessions'; +import { + TranscriptionSession, + TranscriptionSessionCreateParams, + TranscriptionSessions, +} from './transcription-sessions'; export class Realtime extends APIResource { sessions: SessionsAPI.Sessions = new SessionsAPI.Sessions(this._client); + transcriptionSessions: TranscriptionSessionsAPI.TranscriptionSessions = + new TranscriptionSessionsAPI.TranscriptionSessions(this._client); } /** @@ -300,6 +308,91 @@ export interface ConversationItemInputAudioTranscriptionCompletedEvent { * The event type, must be `conversation.item.input_audio_transcription.completed`. */ type: 'conversation.item.input_audio_transcription.completed'; + + /** + * The log probabilities of the transcription. + */ + logprobs?: Array | null; +} + +export namespace ConversationItemInputAudioTranscriptionCompletedEvent { + /** + * A log probability object. + */ + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes: Array; + + /** + * The log probability of the token. + */ + logprob: number; + } +} + +/** + * Returned when the text value of an input audio transcription content part is + * updated. + */ +export interface ConversationItemInputAudioTranscriptionDeltaEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.input_audio_transcription.delta`. + */ + type: 'conversation.item.input_audio_transcription.delta'; + + /** + * The index of the content part in the item's content array. + */ + content_index?: number; + + /** + * The text delta. + */ + delta?: string; + + /** + * The log probabilities of the transcription. + */ + logprobs?: Array | null; +} + +export namespace ConversationItemInputAudioTranscriptionDeltaEvent { + /** + * A log probability object. + */ + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes: Array; + + /** + * The log probability of the token. + */ + logprob: number; + } } /** @@ -361,6 +454,30 @@ export namespace ConversationItemInputAudioTranscriptionFailedEvent { } } +/** + * Send this event when you want to retrieve the server's representation of a + * specific item in the conversation history. This is useful, for example, to + * inspect user audio after noise cancellation and VAD. The server will respond + * with a `conversation.item.retrieved` event, unless the item does not exist in + * the conversation history, in which case the server will respond with an error. + */ +export interface ConversationItemRetrieveEvent { + /** + * The ID of the item to retrieve. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.retrieve`. + */ + type: 'conversation.item.retrieve'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + /** * Send this event to truncate a previous assistant message’s audio. The server * will produce audio faster than realtime, so this event is useful when the user @@ -789,18 +906,20 @@ export namespace RateLimitsUpdatedEvent { } /** - * All events that the client can send to the Realtime API + * A realtime client event. */ export type RealtimeClientEvent = - | SessionUpdateEvent - | InputAudioBufferAppendEvent - | InputAudioBufferCommitEvent - | InputAudioBufferClearEvent | ConversationItemCreateEvent - | ConversationItemTruncateEvent | ConversationItemDeleteEvent + | ConversationItemRetrieveEvent + | ConversationItemTruncateEvent + | InputAudioBufferAppendEvent + | InputAudioBufferClearEvent + | InputAudioBufferCommitEvent + | ResponseCancelEvent | ResponseCreateEvent - | ResponseCancelEvent; + | SessionUpdateEvent + | TranscriptionSessionUpdate; /** * The response resource. @@ -1009,37 +1128,63 @@ export namespace RealtimeResponseUsage { } /** - * All events that the Realtime API can send back + * A realtime server event. */ export type RealtimeServerEvent = - | ErrorEvent - | SessionCreatedEvent - | SessionUpdatedEvent | ConversationCreatedEvent - | InputAudioBufferCommittedEvent - | InputAudioBufferClearedEvent - | InputAudioBufferSpeechStartedEvent - | InputAudioBufferSpeechStoppedEvent | ConversationItemCreatedEvent + | ConversationItemDeletedEvent | ConversationItemInputAudioTranscriptionCompletedEvent + | ConversationItemInputAudioTranscriptionDeltaEvent | ConversationItemInputAudioTranscriptionFailedEvent + | RealtimeServerEvent.ConversationItemRetrieved | ConversationItemTruncatedEvent - | ConversationItemDeletedEvent + | ErrorEvent + | InputAudioBufferClearedEvent + | InputAudioBufferCommittedEvent + | InputAudioBufferSpeechStartedEvent + | InputAudioBufferSpeechStoppedEvent + | RateLimitsUpdatedEvent + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent | ResponseCreatedEvent | ResponseDoneEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent | ResponseOutputItemAddedEvent | ResponseOutputItemDoneEvent - | ResponseContentPartAddedEvent - | ResponseContentPartDoneEvent | ResponseTextDeltaEvent | ResponseTextDoneEvent - | ResponseAudioTranscriptDeltaEvent - | ResponseAudioTranscriptDoneEvent - | ResponseAudioDeltaEvent - | ResponseAudioDoneEvent - | ResponseFunctionCallArgumentsDeltaEvent - | ResponseFunctionCallArgumentsDoneEvent - | RateLimitsUpdatedEvent; + | SessionCreatedEvent + | SessionUpdatedEvent + | TranscriptionSessionUpdatedEvent; + +export namespace RealtimeServerEvent { + /** + * Returned when a conversation item is retrieved with + * `conversation.item.retrieve`. + */ + export interface ConversationItemRetrieved { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The item to add to the conversation. + */ + item: RealtimeAPI.ConversationItem; + + /** + * The event type, must be `conversation.item.retrieved`. + */ + type: 'conversation.item.retrieved'; + } +} /** * Returned when the model-generated audio is updated. @@ -1834,15 +1979,24 @@ export namespace SessionUpdateEvent { */ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: Session.InputAudioNoiseReduction; + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs * asynchronously through - * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) - * and should be treated as rough guidance rather than the representation - * understood by the model. The client can optionally set the language and prompt - * for transcription, these fields will be passed to the Whisper API. + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ input_audio_transcription?: Session.InputAudioTranscription; @@ -1891,7 +2045,8 @@ export namespace SessionUpdateEvent { output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; /** - * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a + * temperature of 0.8 is highly recommended for best performance. */ temperature?: number; @@ -1907,9 +2062,16 @@ export namespace SessionUpdateEvent { tools?: Array; /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ turn_detection?: Session.TurnDetection; @@ -1922,15 +2084,31 @@ export namespace SessionUpdateEvent { } export namespace Session { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs * asynchronously through - * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) - * and should be treated as rough guidance rather than the representation - * understood by the model. The client can optionally set the language and prompt - * for transcription, these fields will be passed to the Whisper API. + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ export interface InputAudioTranscription { /** @@ -1941,16 +2119,17 @@ export namespace SessionUpdateEvent { language?: string; /** - * The model to use for transcription, `whisper-1` is the only currently supported - * model. + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. */ model?: string; /** * An optional text to guide the model's style or continue a previous audio - * segment. The - * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - * should match the audio language. + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". */ prompt?: string; } @@ -1979,48 +2158,62 @@ export namespace SessionUpdateEvent { } /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ export interface TurnDetection { /** * Whether or not to automatically generate a response when a VAD stop event - * occurs. `true` by default. + * occurs. */ create_response?: boolean; + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + /** * Whether or not to automatically interrupt any ongoing response with output to * the default conversation (i.e. `conversation` of `auto`) when a VAD start event - * occurs. `true` by default. + * occurs. */ interrupt_response?: boolean; /** - * Amount of audio to include before the VAD detected speech (in milliseconds). - * Defaults to 300ms. + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. */ prefix_padding_ms?: number; /** - * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. - * With shorter values the model will respond more quickly, but may jump in on - * short pauses from the user. + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. */ silence_duration_ms?: number; /** - * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher - * threshold will require louder audio to activate the model, and thus might - * perform better in noisy environments. + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. */ threshold?: number; /** - * Type of turn detection, only `server_vad` is currently supported. + * Type of turn detection. */ - type?: string; + type?: 'server_vad' | 'semantic_vad'; } } } @@ -2046,7 +2239,216 @@ export interface SessionUpdatedEvent { type: 'session.updated'; } +/** + * Send this event to update a transcription session. + */ +export interface TranscriptionSessionUpdate { + /** + * Realtime transcription session object configuration. + */ + session: TranscriptionSessionUpdate.Session; + + /** + * The event type, must be `transcription_session.update`. + */ + type: 'transcription_session.update'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +export namespace TranscriptionSessionUpdate { + /** + * Realtime transcription session object configuration. + */ + export interface Session { + /** + * The set of items to include in the transcription. Current available items are: + * + * - `item.input_audio_transcription.logprobs` + */ + include?: Array; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: Session.InputAudioNoiseReduction; + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + input_audio_transcription?: Session.InputAudioTranscription; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + turn_detection?: Session.TurnDetection; + } + + export namespace Session { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. + */ + model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1'; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". + */ + prompt?: string; + } + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + export interface TurnDetection { + /** + * Whether or not to automatically generate a response when a VAD stop event + * occurs. + */ + create_response?: boolean; + + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. + */ + interrupt_response?: boolean; + + /** + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection. + */ + type?: 'server_vad' | 'semantic_vad'; + } + } +} + +/** + * Returned when a transcription session is updated with a + * `transcription_session.update` event, unless there is an error. + */ +export interface TranscriptionSessionUpdatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * A new Realtime transcription session configuration. + * + * When a session is created on the server via REST API, the session object also + * contains an ephemeral key. Default TTL for keys is one minute. This property is + * not present when a session is updated via the WebSocket API. + */ + session: TranscriptionSessionsAPI.TranscriptionSession; + + /** + * The event type, must be `transcription_session.updated`. + */ + type: 'transcription_session.updated'; +} + Realtime.Sessions = Sessions; +Realtime.TranscriptionSessions = TranscriptionSessions; export declare namespace Realtime { export { @@ -2055,4 +2457,10 @@ export declare namespace Realtime { type SessionCreateResponse as SessionCreateResponse, type SessionCreateParams as SessionCreateParams, }; + + export { + TranscriptionSessions as TranscriptionSessions, + type TranscriptionSession as TranscriptionSession, + type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams, + }; } diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts index a99c9e045..bae50124e 100644 --- a/src/resources/beta/realtime/sessions.ts +++ b/src/resources/beta/realtime/sessions.ts @@ -27,7 +27,7 @@ export class Sessions extends APIResource { */ export interface Session { /** - * Unique identifier for the session object. + * Unique identifier for the session that looks like `sess_1234567890abcdef`. */ id?: string; @@ -38,12 +38,24 @@ export interface Session { */ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: Session.InputAudioNoiseReduction; + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs - * asynchronously through Whisper and should be treated as rough guidance rather - * than the representation understood by the model. + * asynchronously through + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ input_audio_transcription?: Session.InputAudioTranscription; @@ -79,7 +91,6 @@ export interface Session { * The Realtime model used for this session. */ model?: - | (string & {}) | 'gpt-4o-realtime-preview' | 'gpt-4o-realtime-preview-2024-10-01' | 'gpt-4o-realtime-preview-2024-12-17' @@ -93,7 +104,8 @@ export interface Session { output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; /** - * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a + * temperature of 0.8 is highly recommended for best performance. */ temperature?: number; @@ -109,11 +121,18 @@ export interface Session { tools?: Array; /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ - turn_detection?: Session.TurnDetection | null; + turn_detection?: Session.TurnDetection; /** * The voice the model uses to respond. Voice cannot be changed during the session @@ -124,19 +143,54 @@ export interface Session { } export namespace Session { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs - * asynchronously through Whisper and should be treated as rough guidance rather - * than the representation understood by the model. + * asynchronously through + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ export interface InputAudioTranscription { /** - * The model to use for transcription, `whisper-1` is the only currently supported - * model. + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. */ model?: string; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". + */ + prompt?: string; } export interface Tool { @@ -163,48 +217,62 @@ export namespace Session { } /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ export interface TurnDetection { /** * Whether or not to automatically generate a response when a VAD stop event - * occurs. `true` by default. + * occurs. */ create_response?: boolean; + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + /** * Whether or not to automatically interrupt any ongoing response with output to * the default conversation (i.e. `conversation` of `auto`) when a VAD start event - * occurs. `true` by default. + * occurs. */ interrupt_response?: boolean; /** - * Amount of audio to include before the VAD detected speech (in milliseconds). - * Defaults to 300ms. + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. */ prefix_padding_ms?: number; /** - * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. - * With shorter values the model will respond more quickly, but may jump in on - * short pauses from the user. + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. */ silence_duration_ms?: number; /** - * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher - * threshold will require louder audio to activate the model, and thus might - * perform better in noisy environments. + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. */ threshold?: number; /** - * Type of turn detection, only `server_vad` is currently supported. + * Type of turn detection. */ - type?: 'server_vad'; + type?: 'server_vad' | 'semantic_vad'; } } @@ -394,15 +462,24 @@ export interface SessionCreateParams { */ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: SessionCreateParams.InputAudioNoiseReduction; + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs * asynchronously through - * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) - * and should be treated as rough guidance rather than the representation - * understood by the model. The client can optionally set the language and prompt - * for transcription, these fields will be passed to the Whisper API. + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ input_audio_transcription?: SessionCreateParams.InputAudioTranscription; @@ -451,7 +528,8 @@ export interface SessionCreateParams { output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; /** - * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a + * temperature of 0.8 is highly recommended for best performance. */ temperature?: number; @@ -467,9 +545,16 @@ export interface SessionCreateParams { tools?: Array; /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ turn_detection?: SessionCreateParams.TurnDetection; @@ -482,15 +567,31 @@ export interface SessionCreateParams { } export namespace SessionCreateParams { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs * asynchronously through - * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) - * and should be treated as rough guidance rather than the representation - * understood by the model. The client can optionally set the language and prompt - * for transcription, these fields will be passed to the Whisper API. + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ export interface InputAudioTranscription { /** @@ -501,16 +602,17 @@ export namespace SessionCreateParams { language?: string; /** - * The model to use for transcription, `whisper-1` is the only currently supported - * model. + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. */ model?: string; /** * An optional text to guide the model's style or continue a previous audio - * segment. The - * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - * should match the audio language. + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". */ prompt?: string; } @@ -539,48 +641,62 @@ export namespace SessionCreateParams { } /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ export interface TurnDetection { /** * Whether or not to automatically generate a response when a VAD stop event - * occurs. `true` by default. + * occurs. */ create_response?: boolean; + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + /** * Whether or not to automatically interrupt any ongoing response with output to * the default conversation (i.e. `conversation` of `auto`) when a VAD start event - * occurs. `true` by default. + * occurs. */ interrupt_response?: boolean; /** - * Amount of audio to include before the VAD detected speech (in milliseconds). - * Defaults to 300ms. + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. */ prefix_padding_ms?: number; /** - * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. - * With shorter values the model will respond more quickly, but may jump in on - * short pauses from the user. + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. */ silence_duration_ms?: number; /** - * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher - * threshold will require louder audio to activate the model, and thus might - * perform better in noisy environments. + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. */ threshold?: number; /** - * Type of turn detection, only `server_vad` is currently supported. + * Type of turn detection. */ - type?: string; + type?: 'server_vad' | 'semantic_vad'; } } diff --git a/src/resources/beta/realtime/transcription-sessions.ts b/src/resources/beta/realtime/transcription-sessions.ts new file mode 100644 index 000000000..d749f8502 --- /dev/null +++ b/src/resources/beta/realtime/transcription-sessions.ts @@ -0,0 +1,308 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as Core from '../../../core'; + +export class TranscriptionSessions extends APIResource { + /** + * Create an ephemeral API token for use in client-side applications with the + * Realtime API specifically for realtime transcriptions. Can be configured with + * the same session parameters as the `transcription_session.update` client event. + * + * It responds with a session object, plus a `client_secret` key which contains a + * usable ephemeral API token that can be used to authenticate browser clients for + * the Realtime API. + */ + create( + body: TranscriptionSessionCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post('/realtime/transcription_sessions', { + body, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } +} + +/** + * A new Realtime transcription session configuration. + * + * When a session is created on the server via REST API, the session object also + * contains an ephemeral key. Default TTL for keys is one minute. This property is + * not present when a session is updated via the WebSocket API. + */ +export interface TranscriptionSession { + /** + * Ephemeral key returned by the API. Only present when the session is created on + * the server via REST API. + */ + client_secret: TranscriptionSession.ClientSecret; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + input_audio_format?: string; + + /** + * Configuration of the transcription model. + */ + input_audio_transcription?: TranscriptionSession.InputAudioTranscription; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + turn_detection?: TranscriptionSession.TurnDetection; +} + +export namespace TranscriptionSession { + /** + * Ephemeral key returned by the API. Only present when the session is created on + * the server via REST API. + */ + export interface ClientSecret { + /** + * Timestamp for when the token expires. Currently, all tokens expire after one + * minute. + */ + expires_at: number; + + /** + * Ephemeral key usable in client environments to authenticate connections to the + * Realtime API. Use this in client-side environments rather than a standard API + * token, which should only be used server-side. + */ + value: string; + } + + /** + * Configuration of the transcription model. + */ + export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription. Can be `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, or `whisper-1`. + */ + model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1'; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. The + * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + * should match the audio language. + */ + prompt?: string; + } + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + export interface TurnDetection { + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + * Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + * With shorter values the model will respond more quickly, but may jump in on + * short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection, only `server_vad` is currently supported. + */ + type?: string; + } +} + +export interface TranscriptionSessionCreateParams { + /** + * The set of items to include in the transcription. Current available items are: + * + * - `item.input_audio_transcription.logprobs` + */ + include?: Array; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: TranscriptionSessionCreateParams.InputAudioNoiseReduction; + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + input_audio_transcription?: TranscriptionSessionCreateParams.InputAudioTranscription; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + turn_detection?: TranscriptionSessionCreateParams.TurnDetection; +} + +export namespace TranscriptionSessionCreateParams { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. + */ + model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1'; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". + */ + prompt?: string; + } + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + export interface TurnDetection { + /** + * Whether or not to automatically generate a response when a VAD stop event + * occurs. + */ + create_response?: boolean; + + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. + */ + interrupt_response?: boolean; + + /** + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection. + */ + type?: 'server_vad' | 'semantic_vad'; + } +} + +export declare namespace TranscriptionSessions { + export { + type TranscriptionSession as TranscriptionSession, + type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams, + }; +} diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index f54c01597..08bf7f8db 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -383,7 +383,7 @@ export interface ChatCompletionChunk { * **NOTE:** If the stream is interrupted or cancelled, you may not receive the * final usage chunk which contains the total token usage for the request. */ - usage?: CompletionsAPI.CompletionUsage; + usage?: CompletionsAPI.CompletionUsage | null; } export namespace ChatCompletionChunk { diff --git a/tests/api-resources/audio/speech.test.ts b/tests/api-resources/audio/speech.test.ts index 904d75e5d..cbec6cfac 100644 --- a/tests/api-resources/audio/speech.test.ts +++ b/tests/api-resources/audio/speech.test.ts @@ -14,6 +14,7 @@ describe('resource speech', () => { input: 'input', model: 'string', voice: 'alloy', + instructions: 'instructions', response_format: 'mp3', speed: 0.25, }); diff --git a/tests/api-resources/audio/transcriptions.test.ts b/tests/api-resources/audio/transcriptions.test.ts index 86ef5e576..2297677b4 100644 --- a/tests/api-resources/audio/transcriptions.test.ts +++ b/tests/api-resources/audio/transcriptions.test.ts @@ -12,7 +12,7 @@ describe('resource transcriptions', () => { test('create: only required params', async () => { const responsePromise = client.audio.transcriptions.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'whisper-1', + model: 'gpt-4o-transcribe', }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -26,10 +26,12 @@ describe('resource transcriptions', () => { test('create: required and optional params', async () => { const response = await client.audio.transcriptions.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'whisper-1', + model: 'gpt-4o-transcribe', + include: ['logprobs'], language: 'language', prompt: 'prompt', response_format: 'json', + stream: false, temperature: 0, timestamp_granularities: ['word'], }); diff --git a/tests/api-resources/beta/realtime/transcription-sessions.test.ts b/tests/api-resources/beta/realtime/transcription-sessions.test.ts new file mode 100644 index 000000000..d52ce2403 --- /dev/null +++ b/tests/api-resources/beta/realtime/transcription-sessions.test.ts @@ -0,0 +1,22 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource transcriptionSessions', () => { + test('create', async () => { + const responsePromise = client.beta.realtime.transcriptionSessions.create({}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); +}); From ec5067deba1fe4202d90db42e45c3bd774936af1 Mon Sep 17 00:00:00 2001 From: Kevin Whinnery Date: Thu, 20 Mar 2025 11:34:48 -0500 Subject: [PATCH 170/246] feat: add audio helpers --- examples/speech-to-text.ts | 19 +++++ examples/text-to-speech.ts | 23 ++++++ src/helpers/audio.ts | 145 +++++++++++++++++++++++++++++++++++++ 3 files changed, 187 insertions(+) create mode 100644 examples/speech-to-text.ts create mode 100644 examples/text-to-speech.ts create mode 100644 src/helpers/audio.ts diff --git a/examples/speech-to-text.ts b/examples/speech-to-text.ts new file mode 100644 index 000000000..f2eb60b4d --- /dev/null +++ b/examples/speech-to-text.ts @@ -0,0 +1,19 @@ +import OpenAI from 'openai'; +import { recordAudio } from 'openai/helpers/audio'; + +const openai = new OpenAI(); + +async function main(): Promise { + console.log('Recording for 5 seconds...'); + const response = await recordAudio({ timeout: 5000, device: 4 }); + + console.log('Transcribing...'); + const transcription = await openai.audio.transcriptions.create({ + file: response, + model: 'whisper-1', + }); + + console.log(transcription.text); +} + +main().catch(console.error); diff --git a/examples/text-to-speech.ts b/examples/text-to-speech.ts new file mode 100644 index 000000000..5a87adf91 --- /dev/null +++ b/examples/text-to-speech.ts @@ -0,0 +1,23 @@ +import OpenAI from 'openai'; +import { playAudio } from 'openai/helpers/audio'; + +const openai = new OpenAI(); + +const exampleText = ` +I see skies of blue and clouds of white +The bright blessed days, the dark sacred nights +And I think to myself +What a wonderful world +`.trim(); + +async function main(): Promise { + const response = await openai.audio.speech.create({ + model: 'tts-1', + voice: 'nova', + input: exampleText, + }); + + await playAudio(response); +} + +main().catch(console.error); diff --git a/src/helpers/audio.ts b/src/helpers/audio.ts new file mode 100644 index 000000000..f1a6ea371 --- /dev/null +++ b/src/helpers/audio.ts @@ -0,0 +1,145 @@ +import { File } from 'formdata-node'; +import { spawn } from 'node:child_process'; +import { Readable } from 'node:stream'; +import { platform, versions } from 'node:process'; +import { Response } from 'openai/_shims'; + +const DEFAULT_SAMPLE_RATE = 24000; +const DEFAULT_CHANNELS = 1; + +const isNode = Boolean(versions?.node); + +const recordingProviders: Record = { + win32: 'dshow', + darwin: 'avfoundation', + linux: 'alsa', + aix: 'alsa', + android: 'alsa', + freebsd: 'alsa', + haiku: 'alsa', + sunos: 'alsa', + netbsd: 'alsa', + openbsd: 'alsa', + cygwin: 'dshow', +}; + +function isResponse(stream: NodeJS.ReadableStream | Response | File): stream is Response { + return typeof (stream as any).body !== 'undefined'; +} + +function isFile(stream: NodeJS.ReadableStream | Response | File): stream is File { + return stream instanceof File; +} + +async function nodejsPlayAudio(stream: NodeJS.ReadableStream | Response | File): Promise { + return new Promise((resolve, reject) => { + try { + const ffplay = spawn('ffplay', ['-autoexit', '-nodisp', '-i', 'pipe:0']); + + if (isResponse(stream)) { + stream.body.pipe(ffplay.stdin); + } else if (isFile(stream)) { + Readable.from(stream.stream()).pipe(ffplay.stdin); + } else { + stream.pipe(ffplay.stdin); + } + + ffplay.on('close', (code: number) => { + if (code !== 0) { + reject(new Error(`ffplay process exited with code ${code}`)); + } + resolve(); + }); + } catch (error) { + reject(error); + } + }); +} + +export async function playAudio(input: NodeJS.ReadableStream | Response | File): Promise { + if (isNode) { + return nodejsPlayAudio(input); + } + + throw new Error( + 'Play audio is not supported in the browser yet. Check out https://npm.im/wavtools as an alternative.', + ); +} + +type RecordAudioOptions = { + signal?: AbortSignal; + device?: number; + timeout?: number; +}; + +function nodejsRecordAudio({ signal, device, timeout }: RecordAudioOptions = {}): Promise { + return new Promise((resolve, reject) => { + const data: any[] = []; + const provider = recordingProviders[platform]; + try { + const ffmpeg = spawn( + 'ffmpeg', + [ + '-f', + provider, + '-i', + `:${device ?? 0}`, // default audio input device; adjust as needed + '-ar', + DEFAULT_SAMPLE_RATE.toString(), + '-ac', + DEFAULT_CHANNELS.toString(), + '-f', + 'wav', + 'pipe:1', + ], + { + stdio: ['ignore', 'pipe', 'pipe'], + }, + ); + + ffmpeg.stdout.on('data', (chunk) => { + data.push(chunk); + }); + + ffmpeg.on('error', (error) => { + console.error(error); + reject(error); + }); + + ffmpeg.on('close', (code) => { + returnData(); + }); + + function returnData() { + const audioBuffer = Buffer.concat(data); + const audioFile = new File([audioBuffer], 'audio.wav', { type: 'audio/wav' }); + resolve(audioFile); + } + + if (typeof timeout === 'number' && timeout > 0) { + const internalSignal = AbortSignal.timeout(timeout); + internalSignal.addEventListener('abort', () => { + ffmpeg.kill('SIGTERM'); + }); + } + + if (signal) { + signal.addEventListener('abort', () => { + ffmpeg.kill('SIGTERM'); + }); + } + } catch (error) { + reject(error); + } + }); +} + +export async function recordAudio(options: RecordAudioOptions = {}) { + if (isNode) { + return nodejsRecordAudio(options); + } + + throw new Error( + 'Record audio is not supported in the browser. Check out https://npm.im/wavtools as an alternative.', + ); +} From 4b0d0392cc030e33d3889dfd42382c66df5910da Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 16:35:21 +0000 Subject: [PATCH 171/246] release: 4.89.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 424ace296..c77dd18b0 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.88.0" + ".": "4.89.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e2a73af85..4597d6e56 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.89.0 (2025-03-20) + +Full Changelog: [v4.88.0...v4.89.0](https://github.com/openai/openai-node/compare/v4.88.0...v4.89.0) + +### Features + +* add audio helpers ([ea1b6b4](https://github.com/openai/openai-node/commit/ea1b6b4ef38813af568b3662037519da9404b80e)) +* **api:** new models for TTS, STT, + new audio features for Realtime ([#1407](https://github.com/openai/openai-node/issues/1407)) ([142933a](https://github.com/openai/openai-node/commit/142933ae70d06045dbf4661cd72c7fa35ae7903d)) + + +### Chores + +* **internal:** version bump ([#1400](https://github.com/openai/openai-node/issues/1400)) ([6838ab4](https://github.com/openai/openai-node/commit/6838ab4268c7c0e083e7be21ef1a51bdea0f0b57)) + ## 4.88.0 (2025-03-19) Full Changelog: [v4.87.4...v4.88.0](https://github.com/openai/openai-node/compare/v4.87.4...v4.88.0) diff --git a/jsr.json b/jsr.json index ed87ee6d0..3e7fdb744 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.88.0", + "version": "4.89.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 471fafc31..a77975fda 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.88.0", + "version": "4.89.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c56dab45e..dab92ced6 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.88.0'; // x-release-please-version +export const VERSION = '4.89.0'; // x-release-please-version From 1ed4288c7b9ca8fcb00e524bc6f39c255c6661c5 Mon Sep 17 00:00:00 2001 From: Khai Tran Date: Mon, 24 Mar 2025 16:13:16 -0700 Subject: [PATCH 172/246] chore: update next to 14.2.25 for CVE-2025-29927 --- ecosystem-tests/vercel-edge/package-lock.json | 119 +- ecosystem-tests/vercel-edge/package.json | 2 +- examples/package-lock.json | 2007 +++++++++++++++++ examples/package.json | 2 +- 4 files changed, 2080 insertions(+), 50 deletions(-) create mode 100644 examples/package-lock.json diff --git a/ecosystem-tests/vercel-edge/package-lock.json b/ecosystem-tests/vercel-edge/package-lock.json index bc820a010..541213a8d 100644 --- a/ecosystem-tests/vercel-edge/package-lock.json +++ b/ecosystem-tests/vercel-edge/package-lock.json @@ -9,7 +9,7 @@ "version": "0.1.0", "dependencies": { "ai": "2.1.34", - "next": "14.1.1", + "next": "^14.2.25", "react": "18.2.0", "react-dom": "18.2.0" }, @@ -1180,17 +1180,19 @@ } }, "node_modules/@next/env": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/env/-/env-14.1.1.tgz", - "integrity": "sha512-7CnQyD5G8shHxQIIg3c7/pSeYFeMhsNbpU/bmvH7ZnDql7mNRgg8O2JZrhrc/soFnfBnKP4/xXNiiSIPn2w8gA==" + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.25.tgz", + "integrity": "sha512-JnzQ2cExDeG7FxJwqAksZ3aqVJrHjFwZQAEJ9gQZSoEhIow7SNoKZzju/AwQ+PLIR4NY8V0rhcVozx/2izDO0w==", + "license": "MIT" }, "node_modules/@next/swc-darwin-arm64": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.1.1.tgz", - "integrity": "sha512-yDjSFKQKTIjyT7cFv+DqQfW5jsD+tVxXTckSe1KIouKk75t1qZmj/mV3wzdmFb0XHVGtyRjDMulfVG8uCKemOQ==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.25.tgz", + "integrity": "sha512-09clWInF1YRd6le00vt750s3m7SEYNehz9C4PUcSu3bAdCTpjIV4aTYQZ25Ehrr83VR1rZeqtKUPWSI7GfuKZQ==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "darwin" @@ -1200,12 +1202,13 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.1.1.tgz", - "integrity": "sha512-KCQmBL0CmFmN8D64FHIZVD9I4ugQsDBBEJKiblXGgwn7wBCSe8N4Dx47sdzl4JAg39IkSN5NNrr8AniXLMb3aw==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.25.tgz", + "integrity": "sha512-V+iYM/QR+aYeJl3/FWWU/7Ix4b07ovsQ5IbkwgUK29pTHmq+5UxeDr7/dphvtXEq5pLB/PucfcBNh9KZ8vWbug==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "darwin" @@ -1215,12 +1218,13 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.1.1.tgz", - "integrity": "sha512-YDQfbWyW0JMKhJf/T4eyFr4b3tceTorQ5w2n7I0mNVTFOvu6CGEzfwT3RSAQGTi/FFMTFcuspPec/7dFHuP7Eg==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.25.tgz", + "integrity": "sha512-LFnV2899PJZAIEHQ4IMmZIgL0FBieh5keMnriMY1cK7ompR+JUd24xeTtKkcaw8QmxmEdhoE5Mu9dPSuDBgtTg==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -1230,12 +1234,13 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.1.1.tgz", - "integrity": "sha512-fiuN/OG6sNGRN/bRFxRvV5LyzLB8gaL8cbDH5o3mEiVwfcMzyE5T//ilMmaTrnA8HLMS6hoz4cHOu6Qcp9vxgQ==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.25.tgz", + "integrity": "sha512-QC5y5PPTmtqFExcKWKYgUNkHeHE/z3lUsu83di488nyP0ZzQ3Yse2G6TCxz6nNsQwgAx1BehAJTZez+UQxzLfw==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -1245,12 +1250,13 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.1.1.tgz", - "integrity": "sha512-rv6AAdEXoezjbdfp3ouMuVqeLjE1Bin0AuE6qxE6V9g3Giz5/R3xpocHoAi7CufRR+lnkuUjRBn05SYJ83oKNQ==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.25.tgz", + "integrity": "sha512-y6/ML4b9eQ2D/56wqatTJN5/JR8/xdObU2Fb1RBidnrr450HLCKr6IJZbPqbv7NXmje61UyxjF5kvSajvjye5w==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -1260,12 +1266,13 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.1.1.tgz", - "integrity": "sha512-YAZLGsaNeChSrpz/G7MxO3TIBLaMN8QWMr3X8bt6rCvKovwU7GqQlDu99WdvF33kI8ZahvcdbFsy4jAFzFX7og==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.25.tgz", + "integrity": "sha512-sPX0TSXHGUOZFvv96GoBXpB3w4emMqKeMgemrSxI7A6l55VBJp/RKYLwZIB9JxSqYPApqiREaIIap+wWq0RU8w==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -1275,12 +1282,13 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.1.1.tgz", - "integrity": "sha512-1L4mUYPBMvVDMZg1inUYyPvFSduot0g73hgfD9CODgbr4xiTYe0VOMTZzaRqYJYBA9mana0x4eaAaypmWo1r5A==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.25.tgz", + "integrity": "sha512-ReO9S5hkA1DU2cFCsGoOEp7WJkhFzNbU/3VUF6XxNGUCQChyug6hZdYL/istQgfT/GWE6PNIg9cm784OI4ddxQ==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "win32" @@ -1290,12 +1298,13 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.1.1.tgz", - "integrity": "sha512-jvIE9tsuj9vpbbXlR5YxrghRfMuG0Qm/nZ/1KDHc+y6FpnZ/apsgh+G6t15vefU0zp3WSpTMIdXRUsNl/7RSuw==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.25.tgz", + "integrity": "sha512-DZ/gc0o9neuCDyD5IumyTGHVun2dCox5TfPQI/BJTYwpSNYM3CZDI4i6TOdjeq1JMo+Ug4kPSMuZdwsycwFbAw==", "cpu": [ "ia32" ], + "license": "MIT", "optional": true, "os": [ "win32" @@ -1305,12 +1314,13 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.1.1.tgz", - "integrity": "sha512-S6K6EHDU5+1KrBDLko7/c1MNy/Ya73pIAmvKeFwsF4RmBFJSO7/7YeD4FnZ4iBdzE69PpQ4sOMU9ORKeNuxe8A==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.25.tgz", + "integrity": "sha512-KSznmS6eFjQ9RJ1nEc66kJvtGIL1iZMYmGEXsZPh2YtnLtqrgdVvKXJY2ScjjoFnG6nGLyPFR0UiEvDwVah4Tw==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "win32" @@ -1418,11 +1428,19 @@ "@sinonjs/commons": "^3.0.0" } }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "license": "Apache-2.0" + }, "node_modules/@swc/helpers": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.2.tgz", - "integrity": "sha512-E4KcWTpoLHqwPHLxidpOqQbcrZVgi0rsmmZXUle1jXmJfuIf/UWpczUJ7MZZ5tlxytgJXyp0w4PGkkeLiuIdZw==", + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", + "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", + "license": "Apache-2.0", "dependencies": { + "@swc/counter": "^0.1.3", "tslib": "^2.4.0" } }, @@ -5061,12 +5079,13 @@ "dev": true }, "node_modules/next": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/next/-/next-14.1.1.tgz", - "integrity": "sha512-McrGJqlGSHeaz2yTRPkEucxQKe5Zq7uPwyeHNmJaZNY4wx9E9QdxmTp310agFRoMuIYgQrCrT3petg13fSVOww==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.25.tgz", + "integrity": "sha512-N5M7xMc4wSb4IkPvEV5X2BRRXUmhVHNyaXwEM86+voXthSZz8ZiRyQW4p9mwAoAPIm6OzuVZtn7idgEJeAJN3Q==", + "license": "MIT", "dependencies": { - "@next/env": "14.1.1", - "@swc/helpers": "0.5.2", + "@next/env": "14.2.25", + "@swc/helpers": "0.5.5", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", "graceful-fs": "^4.2.11", @@ -5080,18 +5099,19 @@ "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "14.1.1", - "@next/swc-darwin-x64": "14.1.1", - "@next/swc-linux-arm64-gnu": "14.1.1", - "@next/swc-linux-arm64-musl": "14.1.1", - "@next/swc-linux-x64-gnu": "14.1.1", - "@next/swc-linux-x64-musl": "14.1.1", - "@next/swc-win32-arm64-msvc": "14.1.1", - "@next/swc-win32-ia32-msvc": "14.1.1", - "@next/swc-win32-x64-msvc": "14.1.1" + "@next/swc-darwin-arm64": "14.2.25", + "@next/swc-darwin-x64": "14.2.25", + "@next/swc-linux-arm64-gnu": "14.2.25", + "@next/swc-linux-arm64-musl": "14.2.25", + "@next/swc-linux-x64-gnu": "14.2.25", + "@next/swc-linux-x64-musl": "14.2.25", + "@next/swc-win32-arm64-msvc": "14.2.25", + "@next/swc-win32-ia32-msvc": "14.2.25", + "@next/swc-win32-x64-msvc": "14.2.25" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", + "@playwright/test": "^1.41.2", "react": "^18.2.0", "react-dom": "^18.2.0", "sass": "^1.3.0" @@ -5100,6 +5120,9 @@ "@opentelemetry/api": { "optional": true }, + "@playwright/test": { + "optional": true + }, "sass": { "optional": true } diff --git a/ecosystem-tests/vercel-edge/package.json b/ecosystem-tests/vercel-edge/package.json index 4c75dd4fd..5a8fea816 100644 --- a/ecosystem-tests/vercel-edge/package.json +++ b/ecosystem-tests/vercel-edge/package.json @@ -15,7 +15,7 @@ }, "dependencies": { "ai": "2.1.34", - "next": "14.1.1", + "next": "^14.2.25", "react": "18.2.0", "react-dom": "18.2.0" }, diff --git a/examples/package-lock.json b/examples/package-lock.json new file mode 100644 index 000000000..6feb8c5f4 --- /dev/null +++ b/examples/package-lock.json @@ -0,0 +1,2007 @@ +{ + "name": "openai-examples", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "openai-examples", + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "@azure/identity": "^4.2.0", + "dotenv": "^16.4.7", + "express": "^4.18.2", + "next": "^14.2.25", + "openai": "file:..", + "zod-to-json-schema": "^3.21.4" + }, + "devDependencies": { + "@types/body-parser": "^1.19.3", + "@types/express": "^4.17.19", + "@types/web": "^0.0.194" + } + }, + "..": { + "name": "openai", + "version": "4.89.0", + "license": "Apache-2.0", + "dependencies": { + "@types/node": "^18.11.18", + "@types/node-fetch": "^2.6.4", + "abort-controller": "^3.0.0", + "agentkeepalive": "^4.2.1", + "form-data-encoder": "1.7.2", + "formdata-node": "^4.3.2", + "node-fetch": "^2.6.7" + }, + "bin": { + "openai": "bin/cli" + }, + "devDependencies": { + "@swc/core": "^1.3.102", + "@swc/jest": "^0.2.29", + "@types/jest": "^29.4.0", + "@types/ws": "^8.5.13", + "@typescript-eslint/eslint-plugin": "^6.7.0", + "@typescript-eslint/parser": "^6.7.0", + "eslint": "^8.49.0", + "eslint-plugin-prettier": "^5.0.1", + "eslint-plugin-unused-imports": "^3.0.0", + "fast-check": "^3.22.0", + "iconv-lite": "^0.6.3", + "jest": "^29.4.0", + "prettier": "^3.0.0", + "prettier-2": "npm:prettier@^2", + "ts-jest": "^29.1.0", + "ts-node": "^10.5.0", + "tsc-multi": "^1.1.0", + "tsconfig-paths": "^4.0.0", + "typescript": "^4.8.2", + "ws": "^8.18.0", + "zod": "^3.23.8" + }, + "peerDependencies": { + "ws": "^8.18.0", + "zod": "^3.23.8" + }, + "peerDependenciesMeta": { + "ws": { + "optional": true + }, + "zod": { + "optional": true + } + } + }, + "node_modules/@azure/abort-controller": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@azure/abort-controller/-/abort-controller-2.1.2.tgz", + "integrity": "sha512-nBrLsEWm4J2u5LpAPjxADTlq3trDgVZZXHNKabeXZtpq3d3AbN/KGO82R87rdDz5/lYB024rtEf10/q0urNgsA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-auth": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@azure/core-auth/-/core-auth-1.9.0.tgz", + "integrity": "sha512-FPwHpZywuyasDSLMqJ6fhbOK3TqUdviZNF8OqRGA4W5Ewib2lEEZ+pBsYcBa88B2NGO/SEnYPGhyBqNlE8ilSw==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-util": "^1.11.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-client": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@azure/core-client/-/core-client-1.9.3.tgz", + "integrity": "sha512-/wGw8fJ4mdpJ1Cum7s1S+VQyXt1ihwKLzfabS1O/RDADnmzVc01dHn44qD0BvGH6KlZNzOMW95tEpKqhkCChPA==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-auth": "^1.4.0", + "@azure/core-rest-pipeline": "^1.9.1", + "@azure/core-tracing": "^1.0.0", + "@azure/core-util": "^1.6.1", + "@azure/logger": "^1.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-rest-pipeline": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/@azure/core-rest-pipeline/-/core-rest-pipeline-1.19.1.tgz", + "integrity": "sha512-zHeoI3NCs53lLBbWNzQycjnYKsA1CVKlnzSNuSFcUDwBp8HHVObePxrM7HaX+Ha5Ks639H7chNC9HOaIhNS03w==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-auth": "^1.8.0", + "@azure/core-tracing": "^1.0.1", + "@azure/core-util": "^1.11.0", + "@azure/logger": "^1.0.0", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-tracing": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@azure/core-tracing/-/core-tracing-1.2.0.tgz", + "integrity": "sha512-UKTiEJPkWcESPYJz3X5uKRYyOcJD+4nYph+KpfdPRnQJVrZfk0KJgdnaAWKfhsBBtAf/D58Az4AvCJEmWgIBAg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-util": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/@azure/core-util/-/core-util-1.11.0.tgz", + "integrity": "sha512-DxOSLua+NdpWoSqULhjDyAZTXFdP/LKkqtYuxxz1SCN289zk3OG8UOpnCQAz/tygyACBtWp/BoO72ptK7msY8g==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/identity": { + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/@azure/identity/-/identity-4.8.0.tgz", + "integrity": "sha512-l9ALUGHtFB/JfsqmA+9iYAp2a+cCwdNO/cyIr2y7nJLJsz1aae6qVP8XxT7Kbudg0IQRSIMXj0+iivFdbD1xPA==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-auth": "^1.9.0", + "@azure/core-client": "^1.9.2", + "@azure/core-rest-pipeline": "^1.17.0", + "@azure/core-tracing": "^1.0.0", + "@azure/core-util": "^1.11.0", + "@azure/logger": "^1.0.0", + "@azure/msal-browser": "^4.2.0", + "@azure/msal-node": "^3.2.3", + "events": "^3.0.0", + "jws": "^4.0.0", + "open": "^10.1.0", + "stoppable": "^1.1.0", + "tslib": "^2.2.0" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/logger": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@azure/logger/-/logger-1.1.4.tgz", + "integrity": "sha512-4IXXzcCdLdlXuCG+8UKEwLA1T1NHqUfanhXYHiQTn+6sfWCZXduqbtXDGceg3Ce5QxTGo7EqmbV6Bi+aqKuClQ==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/msal-browser": { + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/@azure/msal-browser/-/msal-browser-4.8.0.tgz", + "integrity": "sha512-z7kJlMW3IAETyq82LDKJqr++IeOvU728q9lkuTFjEIPUWxnB1OlmuPCF32fYurxOnOnJeFEZxjbEzq8xyP0aag==", + "license": "MIT", + "dependencies": { + "@azure/msal-common": "15.3.0" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@azure/msal-common": { + "version": "15.3.0", + "resolved": "https://registry.npmjs.org/@azure/msal-common/-/msal-common-15.3.0.tgz", + "integrity": "sha512-lh+eZfibGwtQxFnx+mj6cYWn0pwA8tDnn8CBs9P21nC7Uw5YWRwfXaXdVQSMENZ5ojRqR+NzRaucEo4qUvs3pA==", + "license": "MIT", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@azure/msal-node": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@azure/msal-node/-/msal-node-3.4.0.tgz", + "integrity": "sha512-b4wBaPV68i+g61wFOfl5zh1lQ9UylgCQpI2638pJHV0SINneO78hOFdnX8WCoGw5OOc4Eewth9pYOg7gaiyUYw==", + "license": "MIT", + "dependencies": { + "@azure/msal-common": "15.3.0", + "jsonwebtoken": "^9.0.0", + "uuid": "^8.3.0" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/@next/env": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.25.tgz", + "integrity": "sha512-JnzQ2cExDeG7FxJwqAksZ3aqVJrHjFwZQAEJ9gQZSoEhIow7SNoKZzju/AwQ+PLIR4NY8V0rhcVozx/2izDO0w==", + "license": "MIT" + }, + "node_modules/@next/swc-darwin-arm64": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.25.tgz", + "integrity": "sha512-09clWInF1YRd6le00vt750s3m7SEYNehz9C4PUcSu3bAdCTpjIV4aTYQZ25Ehrr83VR1rZeqtKUPWSI7GfuKZQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-darwin-x64": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.25.tgz", + "integrity": "sha512-V+iYM/QR+aYeJl3/FWWU/7Ix4b07ovsQ5IbkwgUK29pTHmq+5UxeDr7/dphvtXEq5pLB/PucfcBNh9KZ8vWbug==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-gnu": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.25.tgz", + "integrity": "sha512-LFnV2899PJZAIEHQ4IMmZIgL0FBieh5keMnriMY1cK7ompR+JUd24xeTtKkcaw8QmxmEdhoE5Mu9dPSuDBgtTg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-musl": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.25.tgz", + "integrity": "sha512-QC5y5PPTmtqFExcKWKYgUNkHeHE/z3lUsu83di488nyP0ZzQ3Yse2G6TCxz6nNsQwgAx1BehAJTZez+UQxzLfw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-gnu": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.25.tgz", + "integrity": "sha512-y6/ML4b9eQ2D/56wqatTJN5/JR8/xdObU2Fb1RBidnrr450HLCKr6IJZbPqbv7NXmje61UyxjF5kvSajvjye5w==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-musl": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.25.tgz", + "integrity": "sha512-sPX0TSXHGUOZFvv96GoBXpB3w4emMqKeMgemrSxI7A6l55VBJp/RKYLwZIB9JxSqYPApqiREaIIap+wWq0RU8w==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-arm64-msvc": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.25.tgz", + "integrity": "sha512-ReO9S5hkA1DU2cFCsGoOEp7WJkhFzNbU/3VUF6XxNGUCQChyug6hZdYL/istQgfT/GWE6PNIg9cm784OI4ddxQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-ia32-msvc": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.25.tgz", + "integrity": "sha512-DZ/gc0o9neuCDyD5IumyTGHVun2dCox5TfPQI/BJTYwpSNYM3CZDI4i6TOdjeq1JMo+Ug4kPSMuZdwsycwFbAw==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-x64-msvc": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.25.tgz", + "integrity": "sha512-KSznmS6eFjQ9RJ1nEc66kJvtGIL1iZMYmGEXsZPh2YtnLtqrgdVvKXJY2ScjjoFnG6nGLyPFR0UiEvDwVah4Tw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "license": "Apache-2.0" + }, + "node_modules/@swc/helpers": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", + "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", + "license": "Apache-2.0", + "dependencies": { + "@swc/counter": "^0.1.3", + "tslib": "^2.4.0" + } + }, + "node_modules/@types/body-parser": { + "version": "1.19.5", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.5.tgz", + "integrity": "sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.38", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz", + "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/express": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.21.tgz", + "integrity": "sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.33", + "@types/qs": "*", + "@types/serve-static": "*" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "4.19.6", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.19.6.tgz", + "integrity": "sha512-N4LZ2xG7DatVqhCZzOGb1Yi5lMbXSZcmdLDe9EzSndPV2HpWYWzRbaerl2n27irrm94EPpprqa8KpskPT085+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "node_modules/@types/http-errors": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.4.tgz", + "integrity": "sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/mime": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.5.tgz", + "integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "22.13.13", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.13.tgz", + "integrity": "sha512-ClsL5nMwKaBRwPcCvH8E7+nU4GxHVx1axNvMZTFHMEfNI7oahimt26P5zjVCRrjiIWj6YFXfE1v3dEp94wLcGQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.20.0" + } + }, + "node_modules/@types/qs": { + "version": "6.9.18", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.18.tgz", + "integrity": "sha512-kK7dgTYDyGqS+e2Q4aK9X3D7q234CIZ1Bv0q/7Z5IwRDoADNU81xXJK/YVyLbLTZCoIwUoDoffFeF+p/eIklAA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/range-parser": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz", + "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/send": { + "version": "0.17.4", + "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.4.tgz", + "integrity": "sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mime": "^1", + "@types/node": "*" + } + }, + "node_modules/@types/serve-static": { + "version": "1.15.7", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.7.tgz", + "integrity": "sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/http-errors": "*", + "@types/node": "*", + "@types/send": "*" + } + }, + "node_modules/@types/web": { + "version": "0.0.194", + "resolved": "https://registry.npmjs.org/@types/web/-/web-0.0.194.tgz", + "integrity": "sha512-VKseTFF3Y8SNbpZqdVFNWQ677ujwNyrI9LcySEUwZX5iebbcdE235Lq/vqrfCzj1oFsXyVUUBqq4x8enXSakMA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "license": "MIT", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/agent-base": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", + "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", + "license": "MIT" + }, + "node_modules/body-parser": { + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.13.0", + "raw-body": "2.5.2", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/buffer-equal-constant-time": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", + "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==", + "license": "BSD-3-Clause" + }, + "node_modules/bundle-name": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bundle-name/-/bundle-name-4.1.0.tgz", + "integrity": "sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==", + "license": "MIT", + "dependencies": { + "run-applescript": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/busboy": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", + "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==", + "dependencies": { + "streamsearch": "^1.1.0" + }, + "engines": { + "node": ">=10.16.0" + } + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001707", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001707.tgz", + "integrity": "sha512-3qtRjw/HQSMlDWf+X79N206fepf4SOOU6SQLMaq/0KkZLmSjPxAkBOQQ+FxbHKfHmYLZFfdWsO3KA90ceHPSnw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/client-only": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", + "license": "MIT" + }, + "node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "license": "MIT", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", + "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==", + "license": "MIT" + }, + "node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/default-browser": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/default-browser/-/default-browser-5.2.1.tgz", + "integrity": "sha512-WY/3TUME0x3KPYdRRxEJJvXRHV4PyPoUsxtZa78lwItwRQRHhd2U9xOscaT/YTf8uCXIAjeJOFBVEh/7FtD8Xg==", + "license": "MIT", + "dependencies": { + "bundle-name": "^4.1.0", + "default-browser-id": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-browser-id": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/default-browser-id/-/default-browser-id-5.0.0.tgz", + "integrity": "sha512-A6p/pu/6fyBcA1TRz/GqWYPViplrftcW2gZC9q79ngNCKAeR/X3gcEdXQHl4KNXV+3wgIJ1CPkJQ3IHM6lcsyA==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/define-lazy-prop": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz", + "integrity": "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "license": "MIT", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/dotenv": { + "version": "16.4.7", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz", + "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com/" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/ecdsa-sig-formatter": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", + "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", + "license": "Apache-2.0", + "dependencies": { + "safe-buffer": "^5.0.1" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + "license": "MIT" + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "license": "MIT", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/express": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz", + "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==", + "license": "MIT", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.3", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.7.1", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.3.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.3", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.12", + "proxy-addr": "~2.0.7", + "qs": "6.13.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.19.0", + "serve-static": "1.16.2", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/finalhandler": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", + "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "license": "ISC" + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "license": "MIT", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/http-proxy-agent/node_modules/debug": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/http-proxy-agent/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/https-proxy-agent/node_modules/debug": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/https-proxy-agent/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/is-docker": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz", + "integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==", + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-inside-container": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz", + "integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==", + "license": "MIT", + "dependencies": { + "is-docker": "^3.0.0" + }, + "bin": { + "is-inside-container": "cli.js" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-wsl": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-3.1.0.tgz", + "integrity": "sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==", + "license": "MIT", + "dependencies": { + "is-inside-container": "^1.0.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT", + "peer": true + }, + "node_modules/jsonwebtoken": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-9.0.2.tgz", + "integrity": "sha512-PRp66vJ865SSqOlgqS8hujT5U4AOgMfhrwYIuIhfKaoSCZcirrmASQr8CX7cUg+RMih+hgznrjp99o+W4pJLHQ==", + "license": "MIT", + "dependencies": { + "jws": "^3.2.2", + "lodash.includes": "^4.3.0", + "lodash.isboolean": "^3.0.3", + "lodash.isinteger": "^4.0.4", + "lodash.isnumber": "^3.0.3", + "lodash.isplainobject": "^4.0.6", + "lodash.isstring": "^4.0.1", + "lodash.once": "^4.0.0", + "ms": "^2.1.1", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=12", + "npm": ">=6" + } + }, + "node_modules/jsonwebtoken/node_modules/jwa": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz", + "integrity": "sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA==", + "license": "MIT", + "dependencies": { + "buffer-equal-constant-time": "1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jsonwebtoken/node_modules/jws": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/jws/-/jws-3.2.2.tgz", + "integrity": "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==", + "license": "MIT", + "dependencies": { + "jwa": "^1.4.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jsonwebtoken/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/jwa": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.0.tgz", + "integrity": "sha512-jrZ2Qx916EA+fq9cEAeCROWPTfCwi1IVHqT2tapuqLEVVDKFDENFw1oL+MwrTvH6msKxsd1YTDVw6uKEcsrLEA==", + "license": "MIT", + "dependencies": { + "buffer-equal-constant-time": "1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jws": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz", + "integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==", + "license": "MIT", + "dependencies": { + "jwa": "^2.0.0", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/lodash.includes": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz", + "integrity": "sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w==", + "license": "MIT" + }, + "node_modules/lodash.isboolean": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz", + "integrity": "sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg==", + "license": "MIT" + }, + "node_modules/lodash.isinteger": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/lodash.isinteger/-/lodash.isinteger-4.0.4.tgz", + "integrity": "sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA==", + "license": "MIT" + }, + "node_modules/lodash.isnumber": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/lodash.isnumber/-/lodash.isnumber-3.0.3.tgz", + "integrity": "sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw==", + "license": "MIT" + }, + "node_modules/lodash.isplainobject": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", + "integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==", + "license": "MIT" + }, + "node_modules/lodash.isstring": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz", + "integrity": "sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==", + "license": "MIT" + }, + "node_modules/lodash.once": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz", + "integrity": "sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==", + "license": "MIT" + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", + "peer": true, + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/next": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.25.tgz", + "integrity": "sha512-N5M7xMc4wSb4IkPvEV5X2BRRXUmhVHNyaXwEM86+voXthSZz8ZiRyQW4p9mwAoAPIm6OzuVZtn7idgEJeAJN3Q==", + "license": "MIT", + "dependencies": { + "@next/env": "14.2.25", + "@swc/helpers": "0.5.5", + "busboy": "1.6.0", + "caniuse-lite": "^1.0.30001579", + "graceful-fs": "^4.2.11", + "postcss": "8.4.31", + "styled-jsx": "5.1.1" + }, + "bin": { + "next": "dist/bin/next" + }, + "engines": { + "node": ">=18.17.0" + }, + "optionalDependencies": { + "@next/swc-darwin-arm64": "14.2.25", + "@next/swc-darwin-x64": "14.2.25", + "@next/swc-linux-arm64-gnu": "14.2.25", + "@next/swc-linux-arm64-musl": "14.2.25", + "@next/swc-linux-x64-gnu": "14.2.25", + "@next/swc-linux-x64-musl": "14.2.25", + "@next/swc-win32-arm64-msvc": "14.2.25", + "@next/swc-win32-ia32-msvc": "14.2.25", + "@next/swc-win32-x64-msvc": "14.2.25" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.1.0", + "@playwright/test": "^1.41.2", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "sass": "^1.3.0" + }, + "peerDependenciesMeta": { + "@opentelemetry/api": { + "optional": true + }, + "@playwright/test": { + "optional": true + }, + "sass": { + "optional": true + } + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/open": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/open/-/open-10.1.0.tgz", + "integrity": "sha512-mnkeQ1qP5Ue2wd+aivTD3NHd/lZ96Lu0jgf0pwktLPtx6cTZiH7tyeGRRHs0zX0rbrahXPnXlUnbeXyaBBuIaw==", + "license": "MIT", + "dependencies": { + "default-browser": "^5.2.1", + "define-lazy-prop": "^3.0.0", + "is-inside-container": "^1.0.0", + "is-wsl": "^3.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/openai": { + "resolved": "..", + "link": true + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-to-regexp": { + "version": "0.1.12", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", + "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/postcss": { + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "license": "MIT", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/qs": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "license": "MIT", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/run-applescript": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-7.0.0.tgz", + "integrity": "sha512-9by4Ij99JUr/MCFBUkDKLWK3G9HVXmabKz9U5MlIAIuvuzkiOicRYs8XJLxX+xahD+mLiiCYDqF9dKAgtzKP1A==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semver": { + "version": "7.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/send": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/serve-static": { + "version": "1.16.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", + "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", + "license": "MIT", + "dependencies": { + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.19.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/stoppable": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/stoppable/-/stoppable-1.1.0.tgz", + "integrity": "sha512-KXDYZ9dszj6bzvnEMRYvxgeTHU74QBFL54XKtP3nyMuJ81CFYtABZ3bAzL2EdFUaEwJOBOgENyFj3R7oTzDyyw==", + "license": "MIT", + "engines": { + "node": ">=4", + "npm": ">=6" + } + }, + "node_modules/streamsearch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", + "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/styled-jsx": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.1.tgz", + "integrity": "sha512-pW7uC1l4mBZ8ugbiZrcIsiIvVx1UmTfw7UkC3Um2tmfUq9Bhk8IiyEIPl6F8agHgjzku6j0xQEZbfA5uSgSaCw==", + "license": "MIT", + "dependencies": { + "client-only": "0.0.1" + }, + "engines": { + "node": ">= 12.0.0" + }, + "peerDependencies": { + "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "license": "MIT", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/undici-types": { + "version": "6.20.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz", + "integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==", + "dev": true, + "license": "MIT" + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/zod": { + "version": "3.24.2", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.24.2.tgz", + "integrity": "sha512-lY7CDW43ECgW9u1TcT3IoXHflywfVqDYze4waEz812jR/bZ8FHDsl7pFQoSZTz5N+2NqRXs8GBwnAwo3ZNxqhQ==", + "license": "MIT", + "peer": true, + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/zod-to-json-schema": { + "version": "3.24.5", + "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.5.tgz", + "integrity": "sha512-/AuWwMP+YqiPbsJx5D6TfgRTc4kTLjsh5SOcd4bLsfUg2RcEXrFMJl1DGgdHy2aCfsIA/cr/1JM0xcB2GZji8g==", + "license": "ISC", + "peerDependencies": { + "zod": "^3.24.1" + } + } + } +} diff --git a/examples/package.json b/examples/package.json index 70ec2c523..db01a2c10 100644 --- a/examples/package.json +++ b/examples/package.json @@ -9,7 +9,7 @@ "@azure/identity": "^4.2.0", "dotenv": "^16.4.7", "express": "^4.18.2", - "next": "^14.1.1", + "next": "^14.2.25", "openai": "file:..", "zod-to-json-schema": "^3.21.4" }, From 22a4d95f8be418827b4b13280c2d7f976bd1ad42 Mon Sep 17 00:00:00 2001 From: Khai Tran Date: Mon, 24 Mar 2025 16:18:28 -0700 Subject: [PATCH 173/246] Update next to patch CVE-2025-29927 --- ecosystem-tests/vercel-edge/package-lock.json | 624 +++++++++++++++--- ecosystem-tests/vercel-edge/package.json | 2 +- 2 files changed, 542 insertions(+), 84 deletions(-) diff --git a/ecosystem-tests/vercel-edge/package-lock.json b/ecosystem-tests/vercel-edge/package-lock.json index 541213a8d..770dc460a 100644 --- a/ecosystem-tests/vercel-edge/package-lock.json +++ b/ecosystem-tests/vercel-edge/package-lock.json @@ -9,7 +9,7 @@ "version": "0.1.0", "dependencies": { "ai": "2.1.34", - "next": "^14.2.25", + "next": "^15.2.3", "react": "18.2.0", "react-dom": "18.2.0" }, @@ -777,6 +777,16 @@ "node": ">=16" } }, + "node_modules/@emnapi/runtime": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.3.1.tgz", + "integrity": "sha512-kEBmG8KyqtxJZv+ygbEim+KCGtIq1fC22Ms3S4ziXmYKm8uyoLX0MHONVKwp+9opg390VaKRNt4a7A9NwmpNhw==", + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, "node_modules/@hapi/hoek": { "version": "9.3.0", "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", @@ -792,6 +802,367 @@ "@hapi/hoek": "^9.0.0" } }, + "node_modules/@img/sharp-darwin-arm64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.33.5.tgz", + "integrity": "sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-arm64": "1.0.4" + } + }, + "node_modules/@img/sharp-darwin-x64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.33.5.tgz", + "integrity": "sha512-fyHac4jIc1ANYGRDxtiqelIbdWkIuQaI84Mv45KvGRRxSAa7o7d1ZKAOBaYbnepLC1WqxfpimdeWfvqqSGwR2Q==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-x64": "1.0.4" + } + }, + "node_modules/@img/sharp-libvips-darwin-arm64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.0.4.tgz", + "integrity": "sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-darwin-x64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.0.4.tgz", + "integrity": "sha512-xnGR8YuZYfJGmWPvmlunFaWJsb9T/AO2ykoP3Fz/0X5XV2aoYBPkX6xqCQvUTKKiLddarLaxpzNe+b1hjeWHAQ==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.0.5.tgz", + "integrity": "sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g==", + "cpu": [ + "arm" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.0.4.tgz", + "integrity": "sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-s390x": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.0.4.tgz", + "integrity": "sha512-u7Wz6ntiSSgGSGcjZ55im6uvTrOxSIS8/dgoVMoiGE9I6JAfU50yH5BoDlYA1tcuGS7g/QNtetJnxA6QEsCVTA==", + "cpu": [ + "s390x" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-x64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.0.4.tgz", + "integrity": "sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-arm64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.0.4.tgz", + "integrity": "sha512-9Ti+BbTYDcsbp4wfYib8Ctm1ilkugkA/uscUn6UXK1ldpC1JjiXbLfFZtRlBhjPZ5o1NCLiDbg8fhUPKStHoTA==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-x64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.0.4.tgz", + "integrity": "sha512-viYN1KX9m+/hGkJtvYYp+CCLgnJXwiQB39damAO7WMdKWlIhmYTfHjwSbQeUK/20vY154mwezd9HflVFM1wVSw==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-linux-arm": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.33.5.tgz", + "integrity": "sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ==", + "cpu": [ + "arm" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm": "1.0.5" + } + }, + "node_modules/@img/sharp-linux-arm64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.33.5.tgz", + "integrity": "sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm64": "1.0.4" + } + }, + "node_modules/@img/sharp-linux-s390x": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.33.5.tgz", + "integrity": "sha512-y/5PCd+mP4CA/sPDKl2961b+C9d+vPAveS33s6Z3zfASk2j5upL6fXVPZi7ztePZ5CuH+1kW8JtvxgbuXHRa4Q==", + "cpu": [ + "s390x" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-s390x": "1.0.4" + } + }, + "node_modules/@img/sharp-linux-x64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.33.5.tgz", + "integrity": "sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-x64": "1.0.4" + } + }, + "node_modules/@img/sharp-linuxmusl-arm64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.33.5.tgz", + "integrity": "sha512-XrHMZwGQGvJg2V/oRSUfSAfjfPxO+4DkiRh6p2AFjLQztWUuY/o8Mq0eMQVIY7HJ1CDQUJlxGGZRw1a5bqmd1g==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-arm64": "1.0.4" + } + }, + "node_modules/@img/sharp-linuxmusl-x64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.33.5.tgz", + "integrity": "sha512-WT+d/cgqKkkKySYmqoZ8y3pxx7lx9vVejxW/W4DOFMYVSkErR+w7mf2u8m/y4+xHe7yY9DAXQMWQhpnMuFfScw==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-x64": "1.0.4" + } + }, + "node_modules/@img/sharp-wasm32": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.33.5.tgz", + "integrity": "sha512-ykUW4LVGaMcU9lu9thv85CbRMAwfeadCJHRsg2GmeRa/cJxsVY9Rbd57JcMxBkKHag5U/x7TSBpScF4U8ElVzg==", + "cpu": [ + "wasm32" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT", + "optional": true, + "dependencies": { + "@emnapi/runtime": "^1.2.0" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-ia32": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.33.5.tgz", + "integrity": "sha512-T36PblLaTwuVJ/zw/LaH0PdZkRz5rd3SmMHX8GSmR7vtNSP5Z6bQkExdSK7xGWyxLw4sUknBuugTelgw2faBbQ==", + "cpu": [ + "ia32" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-x64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.33.5.tgz", + "integrity": "sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, "node_modules/@istanbuljs/load-nyc-config": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", @@ -1180,15 +1551,15 @@ } }, "node_modules/@next/env": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.25.tgz", - "integrity": "sha512-JnzQ2cExDeG7FxJwqAksZ3aqVJrHjFwZQAEJ9gQZSoEhIow7SNoKZzju/AwQ+PLIR4NY8V0rhcVozx/2izDO0w==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/env/-/env-15.2.3.tgz", + "integrity": "sha512-a26KnbW9DFEUsSxAxKBORR/uD9THoYoKbkpFywMN/AFvboTt94b8+g/07T8J6ACsdLag8/PDU60ov4rPxRAixw==", "license": "MIT" }, "node_modules/@next/swc-darwin-arm64": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.25.tgz", - "integrity": "sha512-09clWInF1YRd6le00vt750s3m7SEYNehz9C4PUcSu3bAdCTpjIV4aTYQZ25Ehrr83VR1rZeqtKUPWSI7GfuKZQ==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.2.3.tgz", + "integrity": "sha512-uaBhA8aLbXLqwjnsHSkxs353WrRgQgiFjduDpc7YXEU0B54IKx3vU+cxQlYwPCyC8uYEEX7THhtQQsfHnvv8dw==", "cpu": [ "arm64" ], @@ -1202,9 +1573,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.25.tgz", - "integrity": "sha512-V+iYM/QR+aYeJl3/FWWU/7Ix4b07ovsQ5IbkwgUK29pTHmq+5UxeDr7/dphvtXEq5pLB/PucfcBNh9KZ8vWbug==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.2.3.tgz", + "integrity": "sha512-pVwKvJ4Zk7h+4hwhqOUuMx7Ib02u3gDX3HXPKIShBi9JlYllI0nU6TWLbPT94dt7FSi6mSBhfc2JrHViwqbOdw==", "cpu": [ "x64" ], @@ -1218,9 +1589,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.25.tgz", - "integrity": "sha512-LFnV2899PJZAIEHQ4IMmZIgL0FBieh5keMnriMY1cK7ompR+JUd24xeTtKkcaw8QmxmEdhoE5Mu9dPSuDBgtTg==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.2.3.tgz", + "integrity": "sha512-50ibWdn2RuFFkOEUmo9NCcQbbV9ViQOrUfG48zHBCONciHjaUKtHcYFiCwBVuzD08fzvzkWuuZkd4AqbvKO7UQ==", "cpu": [ "arm64" ], @@ -1234,9 +1605,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.25.tgz", - "integrity": "sha512-QC5y5PPTmtqFExcKWKYgUNkHeHE/z3lUsu83di488nyP0ZzQ3Yse2G6TCxz6nNsQwgAx1BehAJTZez+UQxzLfw==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.2.3.tgz", + "integrity": "sha512-2gAPA7P652D3HzR4cLyAuVYwYqjG0mt/3pHSWTCyKZq/N/dJcUAEoNQMyUmwTZWCJRKofB+JPuDVP2aD8w2J6Q==", "cpu": [ "arm64" ], @@ -1250,9 +1621,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.25.tgz", - "integrity": "sha512-y6/ML4b9eQ2D/56wqatTJN5/JR8/xdObU2Fb1RBidnrr450HLCKr6IJZbPqbv7NXmje61UyxjF5kvSajvjye5w==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.2.3.tgz", + "integrity": "sha512-ODSKvrdMgAJOVU4qElflYy1KSZRM3M45JVbeZu42TINCMG3anp7YCBn80RkISV6bhzKwcUqLBAmOiWkaGtBA9w==", "cpu": [ "x64" ], @@ -1266,9 +1637,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.25.tgz", - "integrity": "sha512-sPX0TSXHGUOZFvv96GoBXpB3w4emMqKeMgemrSxI7A6l55VBJp/RKYLwZIB9JxSqYPApqiREaIIap+wWq0RU8w==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.2.3.tgz", + "integrity": "sha512-ZR9kLwCWrlYxwEoytqPi1jhPd1TlsSJWAc+H/CJHmHkf2nD92MQpSRIURR1iNgA/kuFSdxB8xIPt4p/T78kwsg==", "cpu": [ "x64" ], @@ -1282,9 +1653,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.25.tgz", - "integrity": "sha512-ReO9S5hkA1DU2cFCsGoOEp7WJkhFzNbU/3VUF6XxNGUCQChyug6hZdYL/istQgfT/GWE6PNIg9cm784OI4ddxQ==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.2.3.tgz", + "integrity": "sha512-+G2FrDcfm2YDbhDiObDU/qPriWeiz/9cRR0yMWJeTLGGX6/x8oryO3tt7HhodA1vZ8r2ddJPCjtLcpaVl7TE2Q==", "cpu": [ "arm64" ], @@ -1297,26 +1668,10 @@ "node": ">= 10" } }, - "node_modules/@next/swc-win32-ia32-msvc": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.25.tgz", - "integrity": "sha512-DZ/gc0o9neuCDyD5IumyTGHVun2dCox5TfPQI/BJTYwpSNYM3CZDI4i6TOdjeq1JMo+Ug4kPSMuZdwsycwFbAw==", - "cpu": [ - "ia32" - ], - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 10" - } - }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.25.tgz", - "integrity": "sha512-KSznmS6eFjQ9RJ1nEc66kJvtGIL1iZMYmGEXsZPh2YtnLtqrgdVvKXJY2ScjjoFnG6nGLyPFR0UiEvDwVah4Tw==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.2.3.tgz", + "integrity": "sha512-gHYS9tc+G2W0ZC8rBL+H6RdtXIyk40uLiaos0yj5US85FNhbFEndMA2nW3z47nzOWiSvXTZ5kBClc3rD0zJg0w==", "cpu": [ "x64" ], @@ -1435,13 +1790,12 @@ "license": "Apache-2.0" }, "node_modules/@swc/helpers": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", - "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", + "version": "0.5.15", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.15.tgz", + "integrity": "sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==", "license": "Apache-2.0", "dependencies": { - "@swc/counter": "^0.1.3", - "tslib": "^2.4.0" + "tslib": "^2.8.0" } }, "node_modules/@ts-morph/common": { @@ -2669,7 +3023,8 @@ "node_modules/client-only": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", - "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==" + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", + "license": "MIT" }, "node_modules/cliui": { "version": "8.0.1", @@ -2720,11 +3075,25 @@ "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", "dev": true }, + "node_modules/color": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/color/-/color-4.2.3.tgz", + "integrity": "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==", + "license": "MIT", + "optional": true, + "dependencies": { + "color-convert": "^2.0.1", + "color-string": "^1.9.0" + }, + "engines": { + "node": ">=12.5.0" + } + }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, + "devOptional": true, "dependencies": { "color-name": "~1.1.4" }, @@ -2736,7 +3105,18 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "devOptional": true + }, + "node_modules/color-string": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", + "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", + "license": "MIT", + "optional": true, + "dependencies": { + "color-name": "^1.0.0", + "simple-swizzle": "^0.2.2" + } }, "node_modules/color-support": { "version": "1.1.3", @@ -2918,10 +3298,11 @@ } }, "node_modules/detect-libc": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.2.tgz", - "integrity": "sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==", - "dev": true, + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz", + "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==", + "devOptional": true, + "license": "Apache-2.0", "engines": { "node": ">=8" } @@ -3810,7 +4191,8 @@ "node_modules/graceful-fs": { "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true }, "node_modules/has-flag": { "version": "4.0.0", @@ -5079,41 +5461,42 @@ "dev": true }, "node_modules/next": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/next/-/next-14.2.25.tgz", - "integrity": "sha512-N5M7xMc4wSb4IkPvEV5X2BRRXUmhVHNyaXwEM86+voXthSZz8ZiRyQW4p9mwAoAPIm6OzuVZtn7idgEJeAJN3Q==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/next/-/next-15.2.3.tgz", + "integrity": "sha512-x6eDkZxk2rPpu46E1ZVUWIBhYCLszmUY6fvHBFcbzJ9dD+qRX6vcHusaqqDlnY+VngKzKbAiG2iRCkPbmi8f7w==", "license": "MIT", "dependencies": { - "@next/env": "14.2.25", - "@swc/helpers": "0.5.5", + "@next/env": "15.2.3", + "@swc/counter": "0.1.3", + "@swc/helpers": "0.5.15", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", - "graceful-fs": "^4.2.11", "postcss": "8.4.31", - "styled-jsx": "5.1.1" + "styled-jsx": "5.1.6" }, "bin": { "next": "dist/bin/next" }, "engines": { - "node": ">=18.17.0" + "node": "^18.18.0 || ^19.8.0 || >= 20.0.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "14.2.25", - "@next/swc-darwin-x64": "14.2.25", - "@next/swc-linux-arm64-gnu": "14.2.25", - "@next/swc-linux-arm64-musl": "14.2.25", - "@next/swc-linux-x64-gnu": "14.2.25", - "@next/swc-linux-x64-musl": "14.2.25", - "@next/swc-win32-arm64-msvc": "14.2.25", - "@next/swc-win32-ia32-msvc": "14.2.25", - "@next/swc-win32-x64-msvc": "14.2.25" + "@next/swc-darwin-arm64": "15.2.3", + "@next/swc-darwin-x64": "15.2.3", + "@next/swc-linux-arm64-gnu": "15.2.3", + "@next/swc-linux-arm64-musl": "15.2.3", + "@next/swc-linux-x64-gnu": "15.2.3", + "@next/swc-linux-x64-musl": "15.2.3", + "@next/swc-win32-arm64-msvc": "15.2.3", + "@next/swc-win32-x64-msvc": "15.2.3", + "sharp": "^0.33.5" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", "@playwright/test": "^1.41.2", - "react": "^18.2.0", - "react-dom": "^18.2.0", + "babel-plugin-react-compiler": "*", + "react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", + "react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", "sass": "^1.3.0" }, "peerDependenciesMeta": { @@ -5123,6 +5506,9 @@ "@playwright/test": { "optional": true }, + "babel-plugin-react-compiler": { + "optional": true + }, "sass": { "optional": true } @@ -5815,6 +6201,59 @@ "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==", "dev": true }, + "node_modules/sharp": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.33.5.tgz", + "integrity": "sha512-haPVm1EkS9pgvHrQ/F3Xy+hgcuMV0Wm9vfIBSiwZ05k+xgb0PkBQpGsAA/oWdDobNaZTH5ppvHtzCFbnSEwHVw==", + "hasInstallScript": true, + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "color": "^4.2.3", + "detect-libc": "^2.0.3", + "semver": "^7.6.3" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-darwin-arm64": "0.33.5", + "@img/sharp-darwin-x64": "0.33.5", + "@img/sharp-libvips-darwin-arm64": "1.0.4", + "@img/sharp-libvips-darwin-x64": "1.0.4", + "@img/sharp-libvips-linux-arm": "1.0.5", + "@img/sharp-libvips-linux-arm64": "1.0.4", + "@img/sharp-libvips-linux-s390x": "1.0.4", + "@img/sharp-libvips-linux-x64": "1.0.4", + "@img/sharp-libvips-linuxmusl-arm64": "1.0.4", + "@img/sharp-libvips-linuxmusl-x64": "1.0.4", + "@img/sharp-linux-arm": "0.33.5", + "@img/sharp-linux-arm64": "0.33.5", + "@img/sharp-linux-s390x": "0.33.5", + "@img/sharp-linux-x64": "0.33.5", + "@img/sharp-linuxmusl-arm64": "0.33.5", + "@img/sharp-linuxmusl-x64": "0.33.5", + "@img/sharp-wasm32": "0.33.5", + "@img/sharp-win32-ia32": "0.33.5", + "@img/sharp-win32-x64": "0.33.5" + } + }, + "node_modules/sharp/node_modules/semver": { + "version": "7.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", + "license": "ISC", + "optional": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", @@ -5848,6 +6287,23 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/simple-swizzle": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", + "integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==", + "license": "MIT", + "optional": true, + "dependencies": { + "is-arrayish": "^0.3.1" + } + }, + "node_modules/simple-swizzle/node_modules/is-arrayish": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", + "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==", + "license": "MIT", + "optional": true + }, "node_modules/sisteransi": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", @@ -6079,9 +6535,10 @@ } }, "node_modules/styled-jsx": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.1.tgz", - "integrity": "sha512-pW7uC1l4mBZ8ugbiZrcIsiIvVx1UmTfw7UkC3Um2tmfUq9Bhk8IiyEIPl6F8agHgjzku6j0xQEZbfA5uSgSaCw==", + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.6.tgz", + "integrity": "sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==", + "license": "MIT", "dependencies": { "client-only": "0.0.1" }, @@ -6089,7 +6546,7 @@ "node": ">= 12.0.0" }, "peerDependencies": { - "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0" + "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0" }, "peerDependenciesMeta": { "@babel/core": { @@ -6386,9 +6843,10 @@ "dev": true }, "node_modules/tslib": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", - "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" }, "node_modules/type-detect": { "version": "4.0.8", diff --git a/ecosystem-tests/vercel-edge/package.json b/ecosystem-tests/vercel-edge/package.json index 5a8fea816..420bca941 100644 --- a/ecosystem-tests/vercel-edge/package.json +++ b/ecosystem-tests/vercel-edge/package.json @@ -15,7 +15,7 @@ }, "dependencies": { "ai": "2.1.34", - "next": "^14.2.25", + "next": "^15.2.3", "react": "18.2.0", "react-dom": "18.2.0" }, From 23fd3ffef3b19656b27576b4d0c613d19ea1ae2f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 17:58:50 +0000 Subject: [PATCH 174/246] fix(client): remove duplicate types (#1410) --- src/resources/shared.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 2c0fb1c32..3e8ded763 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,9 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export type AllModels = - | string - | ChatModel - | string + | (string & {}) | ChatModel | 'o1-pro' | 'o1-pro-2025-03-19' From 16e21df0a4d8903ae119a5f7445eafc5031c82a9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 17:49:50 -0400 Subject: [PATCH 175/246] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index abb937131..2df281d34 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 82 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c22f59c66aec7914b6ee653d3098d1c1c8c16c180d2a158e819c8ddbf476f74b.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5ad6884898c07591750dde560118baf7074a59aecd1f367f930c5e42b04e848a.yml From f395e9584ac63780442bb54c2d292914eaecf3c7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 21 Mar 2025 12:11:17 +0000 Subject: [PATCH 176/246] fix: avoid type error in certain environments (#1413) --- src/core.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core.ts b/src/core.ts index a41eaa3fa..1e1cb0a4a 100644 --- a/src/core.ts +++ b/src/core.ts @@ -430,7 +430,7 @@ export abstract class APIClient { !headers ? {} : Symbol.iterator in headers ? Object.fromEntries(Array.from(headers as Iterable).map((header) => [...header])) - : { ...headers } + : { ...(headers as any as Record) } ); } From 06c03d7125d8331679dd206d0e34705d65669046 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 25 Mar 2025 19:08:04 +0000 Subject: [PATCH 177/246] fix(exports): add missing type exports (#1417) --- src/resources/beta/beta.ts | 106 +++++++++++++++++++++++- src/resources/beta/realtime/realtime.ts | 52 ++++++++++++ src/resources/responses/responses.ts | 83 +++++++++++++++++++ 3 files changed, 239 insertions(+), 2 deletions(-) diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index 0b909de18..6282d4593 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -22,7 +22,58 @@ import { ThreadStreamEvent, } from './assistants'; import * as RealtimeAPI from './realtime/realtime'; -import { Realtime } from './realtime/realtime'; +import { + ConversationCreatedEvent, + ConversationItem, + ConversationItemContent, + ConversationItemCreateEvent, + ConversationItemCreatedEvent, + ConversationItemDeleteEvent, + ConversationItemDeletedEvent, + ConversationItemInputAudioTranscriptionCompletedEvent, + ConversationItemInputAudioTranscriptionDeltaEvent, + ConversationItemInputAudioTranscriptionFailedEvent, + ConversationItemRetrieveEvent, + ConversationItemTruncateEvent, + ConversationItemTruncatedEvent, + ConversationItemWithReference, + ErrorEvent, + InputAudioBufferAppendEvent, + InputAudioBufferClearEvent, + InputAudioBufferClearedEvent, + InputAudioBufferCommitEvent, + InputAudioBufferCommittedEvent, + InputAudioBufferSpeechStartedEvent, + InputAudioBufferSpeechStoppedEvent, + RateLimitsUpdatedEvent, + Realtime, + RealtimeClientEvent, + RealtimeResponse, + RealtimeResponseStatus, + RealtimeResponseUsage, + RealtimeServerEvent, + ResponseAudioDeltaEvent, + ResponseAudioDoneEvent, + ResponseAudioTranscriptDeltaEvent, + ResponseAudioTranscriptDoneEvent, + ResponseCancelEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseCreateEvent, + ResponseCreatedEvent, + ResponseDoneEvent, + ResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionCallArgumentsDoneEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseTextDeltaEvent, + ResponseTextDoneEvent, + SessionCreatedEvent, + SessionUpdateEvent, + SessionUpdatedEvent, + TranscriptionSessionUpdate, + TranscriptionSessionUpdatedEvent, +} from './realtime/realtime'; import * as ThreadsAPI from './threads/threads'; import { AssistantResponseFormatOption, @@ -55,7 +106,58 @@ Beta.AssistantsPage = AssistantsPage; Beta.Threads = Threads; export declare namespace Beta { - export { Realtime as Realtime }; + export { + Realtime as Realtime, + type ConversationCreatedEvent as ConversationCreatedEvent, + type ConversationItem as ConversationItem, + type ConversationItemContent as ConversationItemContent, + type ConversationItemCreateEvent as ConversationItemCreateEvent, + type ConversationItemCreatedEvent as ConversationItemCreatedEvent, + type ConversationItemDeleteEvent as ConversationItemDeleteEvent, + type ConversationItemDeletedEvent as ConversationItemDeletedEvent, + type ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent, + type ConversationItemInputAudioTranscriptionDeltaEvent as ConversationItemInputAudioTranscriptionDeltaEvent, + type ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent, + type ConversationItemRetrieveEvent as ConversationItemRetrieveEvent, + type ConversationItemTruncateEvent as ConversationItemTruncateEvent, + type ConversationItemTruncatedEvent as ConversationItemTruncatedEvent, + type ConversationItemWithReference as ConversationItemWithReference, + type ErrorEvent as ErrorEvent, + type InputAudioBufferAppendEvent as InputAudioBufferAppendEvent, + type InputAudioBufferClearEvent as InputAudioBufferClearEvent, + type InputAudioBufferClearedEvent as InputAudioBufferClearedEvent, + type InputAudioBufferCommitEvent as InputAudioBufferCommitEvent, + type InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent, + type InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent, + type InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent, + type RateLimitsUpdatedEvent as RateLimitsUpdatedEvent, + type RealtimeClientEvent as RealtimeClientEvent, + type RealtimeResponse as RealtimeResponse, + type RealtimeResponseStatus as RealtimeResponseStatus, + type RealtimeResponseUsage as RealtimeResponseUsage, + type RealtimeServerEvent as RealtimeServerEvent, + type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent, + type ResponseAudioDoneEvent as ResponseAudioDoneEvent, + type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, + type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent, + type ResponseCancelEvent as ResponseCancelEvent, + type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent, + type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent, + type ResponseCreateEvent as ResponseCreateEvent, + type ResponseCreatedEvent as ResponseCreatedEvent, + type ResponseDoneEvent as ResponseDoneEvent, + type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, + type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, + type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent, + type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent, + type ResponseTextDeltaEvent as ResponseTextDeltaEvent, + type ResponseTextDoneEvent as ResponseTextDoneEvent, + type SessionCreatedEvent as SessionCreatedEvent, + type SessionUpdateEvent as SessionUpdateEvent, + type SessionUpdatedEvent as SessionUpdatedEvent, + type TranscriptionSessionUpdate as TranscriptionSessionUpdate, + type TranscriptionSessionUpdatedEvent as TranscriptionSessionUpdatedEvent, + }; export { Chat }; diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index d0a74840b..224d94f37 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -2451,6 +2451,58 @@ Realtime.Sessions = Sessions; Realtime.TranscriptionSessions = TranscriptionSessions; export declare namespace Realtime { + export { + type ConversationCreatedEvent as ConversationCreatedEvent, + type ConversationItem as ConversationItem, + type ConversationItemContent as ConversationItemContent, + type ConversationItemCreateEvent as ConversationItemCreateEvent, + type ConversationItemCreatedEvent as ConversationItemCreatedEvent, + type ConversationItemDeleteEvent as ConversationItemDeleteEvent, + type ConversationItemDeletedEvent as ConversationItemDeletedEvent, + type ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent, + type ConversationItemInputAudioTranscriptionDeltaEvent as ConversationItemInputAudioTranscriptionDeltaEvent, + type ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent, + type ConversationItemRetrieveEvent as ConversationItemRetrieveEvent, + type ConversationItemTruncateEvent as ConversationItemTruncateEvent, + type ConversationItemTruncatedEvent as ConversationItemTruncatedEvent, + type ConversationItemWithReference as ConversationItemWithReference, + type ErrorEvent as ErrorEvent, + type InputAudioBufferAppendEvent as InputAudioBufferAppendEvent, + type InputAudioBufferClearEvent as InputAudioBufferClearEvent, + type InputAudioBufferClearedEvent as InputAudioBufferClearedEvent, + type InputAudioBufferCommitEvent as InputAudioBufferCommitEvent, + type InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent, + type InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent, + type InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent, + type RateLimitsUpdatedEvent as RateLimitsUpdatedEvent, + type RealtimeClientEvent as RealtimeClientEvent, + type RealtimeResponse as RealtimeResponse, + type RealtimeResponseStatus as RealtimeResponseStatus, + type RealtimeResponseUsage as RealtimeResponseUsage, + type RealtimeServerEvent as RealtimeServerEvent, + type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent, + type ResponseAudioDoneEvent as ResponseAudioDoneEvent, + type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, + type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent, + type ResponseCancelEvent as ResponseCancelEvent, + type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent, + type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent, + type ResponseCreateEvent as ResponseCreateEvent, + type ResponseCreatedEvent as ResponseCreatedEvent, + type ResponseDoneEvent as ResponseDoneEvent, + type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, + type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, + type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent, + type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent, + type ResponseTextDeltaEvent as ResponseTextDeltaEvent, + type ResponseTextDoneEvent as ResponseTextDoneEvent, + type SessionCreatedEvent as SessionCreatedEvent, + type SessionUpdateEvent as SessionUpdateEvent, + type SessionUpdatedEvent as SessionUpdatedEvent, + type TranscriptionSessionUpdate as TranscriptionSessionUpdate, + type TranscriptionSessionUpdatedEvent as TranscriptionSessionUpdatedEvent, + }; + export { Sessions as Sessions, type SessionsAPISession as Session, diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index b90d415bd..706d66730 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -2865,6 +2865,89 @@ export interface ResponseRetrieveParams { Responses.InputItems = InputItems; export declare namespace Responses { + export { + type ComputerTool as ComputerTool, + type EasyInputMessage as EasyInputMessage, + type FileSearchTool as FileSearchTool, + type FunctionTool as FunctionTool, + type Response as Response, + type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent, + type ResponseAudioDoneEvent as ResponseAudioDoneEvent, + type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, + type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent, + type ResponseCodeInterpreterCallCodeDeltaEvent as ResponseCodeInterpreterCallCodeDeltaEvent, + type ResponseCodeInterpreterCallCodeDoneEvent as ResponseCodeInterpreterCallCodeDoneEvent, + type ResponseCodeInterpreterCallCompletedEvent as ResponseCodeInterpreterCallCompletedEvent, + type ResponseCodeInterpreterCallInProgressEvent as ResponseCodeInterpreterCallInProgressEvent, + type ResponseCodeInterpreterCallInterpretingEvent as ResponseCodeInterpreterCallInterpretingEvent, + type ResponseCodeInterpreterToolCall as ResponseCodeInterpreterToolCall, + type ResponseCompletedEvent as ResponseCompletedEvent, + type ResponseComputerToolCall as ResponseComputerToolCall, + type ResponseComputerToolCallOutputItem as ResponseComputerToolCallOutputItem, + type ResponseComputerToolCallOutputScreenshot as ResponseComputerToolCallOutputScreenshot, + type ResponseContent as ResponseContent, + type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent, + type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent, + type ResponseCreatedEvent as ResponseCreatedEvent, + type ResponseError as ResponseError, + type ResponseErrorEvent as ResponseErrorEvent, + type ResponseFailedEvent as ResponseFailedEvent, + type ResponseFileSearchCallCompletedEvent as ResponseFileSearchCallCompletedEvent, + type ResponseFileSearchCallInProgressEvent as ResponseFileSearchCallInProgressEvent, + type ResponseFileSearchCallSearchingEvent as ResponseFileSearchCallSearchingEvent, + type ResponseFileSearchToolCall as ResponseFileSearchToolCall, + type ResponseFormatTextConfig as ResponseFormatTextConfig, + type ResponseFormatTextJSONSchemaConfig as ResponseFormatTextJSONSchemaConfig, + type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, + type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, + type ResponseFunctionToolCall as ResponseFunctionToolCall, + type ResponseFunctionToolCallItem as ResponseFunctionToolCallItem, + type ResponseFunctionToolCallOutputItem as ResponseFunctionToolCallOutputItem, + type ResponseFunctionWebSearch as ResponseFunctionWebSearch, + type ResponseInProgressEvent as ResponseInProgressEvent, + type ResponseIncludable as ResponseIncludable, + type ResponseIncompleteEvent as ResponseIncompleteEvent, + type ResponseInput as ResponseInput, + type ResponseInputAudio as ResponseInputAudio, + type ResponseInputContent as ResponseInputContent, + type ResponseInputFile as ResponseInputFile, + type ResponseInputImage as ResponseInputImage, + type ResponseInputItem as ResponseInputItem, + type ResponseInputMessageContentList as ResponseInputMessageContentList, + type ResponseInputMessageItem as ResponseInputMessageItem, + type ResponseInputText as ResponseInputText, + type ResponseItem as ResponseItem, + type ResponseOutputAudio as ResponseOutputAudio, + type ResponseOutputItem as ResponseOutputItem, + type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent, + type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent, + type ResponseOutputMessage as ResponseOutputMessage, + type ResponseOutputRefusal as ResponseOutputRefusal, + type ResponseOutputText as ResponseOutputText, + type ResponseReasoningItem as ResponseReasoningItem, + type ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent, + type ResponseRefusalDoneEvent as ResponseRefusalDoneEvent, + type ResponseStatus as ResponseStatus, + type ResponseStreamEvent as ResponseStreamEvent, + type ResponseTextAnnotationDeltaEvent as ResponseTextAnnotationDeltaEvent, + type ResponseTextConfig as ResponseTextConfig, + type ResponseTextDeltaEvent as ResponseTextDeltaEvent, + type ResponseTextDoneEvent as ResponseTextDoneEvent, + type ResponseUsage as ResponseUsage, + type ResponseWebSearchCallCompletedEvent as ResponseWebSearchCallCompletedEvent, + type ResponseWebSearchCallInProgressEvent as ResponseWebSearchCallInProgressEvent, + type ResponseWebSearchCallSearchingEvent as ResponseWebSearchCallSearchingEvent, + type Tool as Tool, + type ToolChoiceFunction as ToolChoiceFunction, + type ToolChoiceOptions as ToolChoiceOptions, + type ToolChoiceTypes as ToolChoiceTypes, + type WebSearchTool as WebSearchTool, + type ResponseCreateParams as ResponseCreateParams, + type ResponseCreateParamsNonStreaming as ResponseCreateParamsNonStreaming, + type ResponseCreateParamsStreaming as ResponseCreateParamsStreaming, + type ResponseRetrieveParams as ResponseRetrieveParams, + }; + export { InputItems as InputItems, type ResponseItemList as ResponseItemList, From 7239db018432c10c4c42962b8b3525011d0d375e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 05:07:24 +0000 Subject: [PATCH 178/246] release: 4.89.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 15 +++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 19 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index c77dd18b0..05b012220 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.89.0" + ".": "4.89.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 4597d6e56..2bd7f344f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## 4.89.1 (2025-03-26) + +Full Changelog: [v4.89.0...v4.89.1](https://github.com/openai/openai-node/compare/v4.89.0...v4.89.1) + +### Bug Fixes + +* avoid type error in certain environments ([#1413](https://github.com/openai/openai-node/issues/1413)) ([d3f6f8f](https://github.com/openai/openai-node/commit/d3f6f8f9c7511a98cc5795756fee49a30e44d485)) +* **client:** remove duplicate types ([#1410](https://github.com/openai/openai-node/issues/1410)) ([338878b](https://github.com/openai/openai-node/commit/338878bf484dac5a4fadf50592b1f8d1045cd4b6)) +* **exports:** add missing type exports ([#1417](https://github.com/openai/openai-node/issues/1417)) ([2d15ada](https://github.com/openai/openai-node/commit/2d15ada0e0d81a4e0d097dddbe99be2222c4c0ef)) + + +### Chores + +* **internal:** version bump ([#1408](https://github.com/openai/openai-node/issues/1408)) ([9c0949a](https://github.com/openai/openai-node/commit/9c0949a93c3e181d327f820dbc2a4b0ad77258e9)) + ## 4.89.0 (2025-03-20) Full Changelog: [v4.88.0...v4.89.0](https://github.com/openai/openai-node/compare/v4.88.0...v4.89.0) diff --git a/jsr.json b/jsr.json index 3e7fdb744..393ef104b 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.89.0", + "version": "4.89.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index a77975fda..29c52a1a5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.89.0", + "version": "4.89.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index dab92ced6..c8c72aa23 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.89.0'; // x-release-please-version +export const VERSION = '4.89.1'; // x-release-please-version From 48921aaabc3456408907e4bcf1cc074a9228c459 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 17:33:07 +0000 Subject: [PATCH 179/246] chore: add hash of OpenAPI spec/config inputs to .stats.yml --- .stats.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.stats.yml b/.stats.yml index 2df281d34..fe9320429 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5ad6884898c07591750dde560118baf7074a59aecd1f367f930c5e42b04e848a.yml +openapi_spec_hash: 0c255269b89767eae26f4d4dc22d3cbd +config_hash: d36e491b0afc4f79e3afad4b3c9bec70 From 6c93a23b79f335a21c65b52d1192890a5325ed6d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 19:33:55 +0000 Subject: [PATCH 180/246] chore(client): expose headers on some streaming errors (#1423) --- src/streaming.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/streaming.ts b/src/streaming.ts index 25b960314..c9cf2fab8 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -3,6 +3,7 @@ import { OpenAIError } from './error'; import { findDoubleNewlineIndex, LineDecoder } from './internal/decoders/line'; import { ReadableStreamToAsyncIterable } from './internal/stream-utils'; +import { createResponseHeaders } from './core'; import { APIError } from './error'; type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; @@ -53,7 +54,7 @@ export class Stream implements AsyncIterable { } if (data && data.error) { - throw new APIError(undefined, data.error, undefined, undefined); + throw new APIError(undefined, data.error, undefined, createResponseHeaders(response.headers)); } yield data; From fb0e96a7fa8c020ac6109951bb36f9a4ada24d03 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 20:32:39 +0000 Subject: [PATCH 181/246] chore(api): updates to supported Voice IDs (#1424) --- .stats.yml | 4 +- src/resources/audio/speech.ts | 18 +++++- src/resources/beta/realtime/realtime.ts | 61 +++++++++++++++---- src/resources/beta/realtime/sessions.ts | 47 ++++++++++++-- .../beta/realtime/transcription-sessions.ts | 4 +- src/resources/chat/completions/completions.ts | 14 ++++- src/resources/responses/input-items.ts | 6 ++ src/resources/responses/responses.ts | 20 +++--- tests/api-resources/audio/speech.test.ts | 2 +- .../chat/completions/completions.test.ts | 2 +- .../responses/input-items.test.ts | 2 +- 11 files changed, 143 insertions(+), 37 deletions(-) diff --git a/.stats.yml b/.stats.yml index fe9320429..4d1276a5e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5ad6884898c07591750dde560118baf7074a59aecd1f367f930c5e42b04e848a.yml -openapi_spec_hash: 0c255269b89767eae26f4d4dc22d3cbd +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml +openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e config_hash: d36e491b0afc4f79e3afad4b3c9bec70 diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 4324028d5..4b99ee5f4 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -34,11 +34,23 @@ export interface SpeechCreateParams { /** * The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - * `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the - * voices are available in the + * `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + * `verse`. Previews of the voices are available in the * [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). */ - voice: 'alloy' | 'ash' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer'; + voice: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; /** * Control the voice of your generated audio with additional instructions. Does not diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index 224d94f37..1c02fdd1a 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -1005,9 +1005,22 @@ export interface RealtimeResponse { /** * The voice the model used to respond. Current voice options are `alloy`, `ash`, - * `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. - */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + * `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + * `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } /** @@ -1620,9 +1633,22 @@ export namespace ResponseCreateEvent { /** * The voice the model uses to respond. Voice cannot be changed during the session * once the model has responded with audio at least once. Current voice options are - * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. - */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + * `shimmer`, and `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } export namespace Response { @@ -2078,9 +2104,22 @@ export namespace SessionUpdateEvent { /** * The voice the model uses to respond. Voice cannot be changed during the session * once the model has responded with audio at least once. Current voice options are - * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. - */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + * `shimmer`, and `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } export namespace Session { @@ -2376,7 +2415,7 @@ export namespace TranscriptionSessionUpdate { export interface TurnDetection { /** * Whether or not to automatically generate a response when a VAD stop event - * occurs. + * occurs. Not available for transcription sessions. */ create_response?: boolean; @@ -2390,7 +2429,7 @@ export namespace TranscriptionSessionUpdate { /** * Whether or not to automatically interrupt any ongoing response with output to * the default conversation (i.e. `conversation` of `auto`) when a VAD start event - * occurs. + * occurs. Not available for transcription sessions. */ interrupt_response?: boolean; diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts index bae50124e..28a44431e 100644 --- a/src/resources/beta/realtime/sessions.ts +++ b/src/resources/beta/realtime/sessions.ts @@ -139,7 +139,19 @@ export interface Session { * once the model has responded with audio at least once. Current voice options are * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } export namespace Session { @@ -361,7 +373,19 @@ export interface SessionCreateResponse { * once the model has responded with audio at least once. Current voice options are * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } export namespace SessionCreateResponse { @@ -561,9 +585,22 @@ export interface SessionCreateParams { /** * The voice the model uses to respond. Voice cannot be changed during the session * once the model has responded with audio at least once. Current voice options are - * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. - */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + * `shimmer`, and `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } export namespace SessionCreateParams { diff --git a/src/resources/beta/realtime/transcription-sessions.ts b/src/resources/beta/realtime/transcription-sessions.ts index d749f8502..a54ec1125 100644 --- a/src/resources/beta/realtime/transcription-sessions.ts +++ b/src/resources/beta/realtime/transcription-sessions.ts @@ -255,7 +255,7 @@ export namespace TranscriptionSessionCreateParams { export interface TurnDetection { /** * Whether or not to automatically generate a response when a VAD stop event - * occurs. + * occurs. Not available for transcription sessions. */ create_response?: boolean; @@ -269,7 +269,7 @@ export namespace TranscriptionSessionCreateParams { /** * Whether or not to automatically interrupt any ongoing response with output to * the default conversation (i.e. `conversation` of `auto`) when a VAD start event - * occurs. + * occurs. Not available for transcription sessions. */ interrupt_response?: boolean; diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 08bf7f8db..f0ef1d0cc 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -325,7 +325,19 @@ export interface ChatCompletionAudioParam { * The voice the model uses to respond. Supported voices are `alloy`, `ash`, * `ballad`, `coral`, `echo`, `sage`, and `shimmer`. */ - voice: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + voice: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } /** diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts index f2292e5c6..c88bb441d 100644 --- a/src/resources/responses/input-items.ts +++ b/src/resources/responses/input-items.ts @@ -71,6 +71,12 @@ export interface InputItemListParams extends CursorPageParams { */ before?: string; + /** + * Additional fields to include in the response. See the `include` parameter for + * Response creation above for more information. + */ + include?: Array; + /** * The order to return the input items in. Default is `asc`. * diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 706d66730..6c9f58b43 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -305,8 +305,8 @@ export interface Response { * context. * * When using along with `previous_response_id`, the instructions from a previous - * response will be not be carried over to the next response. This makes it simple - * to swap out system (or developer) messages in new responses. + * response will not be carried over to the next response. This makes it simple to + * swap out system (or developer) messages in new responses. */ instructions: string | null; @@ -1356,6 +1356,12 @@ export type ResponseFormatTextConfig = * [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). */ export interface ResponseFormatTextJSONSchemaConfig { + /** + * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + * and dashes, with a maximum length of 64. + */ + name: string; + /** * The schema for the response format, described as a JSON Schema object. Learn how * to build JSON schemas [here](https://json-schema.org/). @@ -1373,12 +1379,6 @@ export interface ResponseFormatTextJSONSchemaConfig { */ description?: string; - /** - * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores - * and dashes, with a maximum length of 64. - */ - name?: string; - /** * Whether to enable strict schema adherence when generating the output. If set to * true, the model will always follow the exact schema defined in the `schema` @@ -2698,8 +2698,8 @@ export interface ResponseCreateParamsBase { * context. * * When using along with `previous_response_id`, the instructions from a previous - * response will be not be carried over to the next response. This makes it simple - * to swap out system (or developer) messages in new responses. + * response will not be carried over to the next response. This makes it simple to + * swap out system (or developer) messages in new responses. */ instructions?: string | null; diff --git a/tests/api-resources/audio/speech.test.ts b/tests/api-resources/audio/speech.test.ts index cbec6cfac..191c6a313 100644 --- a/tests/api-resources/audio/speech.test.ts +++ b/tests/api-resources/audio/speech.test.ts @@ -13,7 +13,7 @@ describe('resource speech', () => { const response = await client.audio.speech.create({ input: 'input', model: 'string', - voice: 'alloy', + voice: 'ash', instructions: 'instructions', response_format: 'mp3', speed: 0.25, diff --git a/tests/api-resources/chat/completions/completions.test.ts b/tests/api-resources/chat/completions/completions.test.ts index eddf252b1..60c23591a 100644 --- a/tests/api-resources/chat/completions/completions.test.ts +++ b/tests/api-resources/chat/completions/completions.test.ts @@ -27,7 +27,7 @@ describe('resource completions', () => { const response = await client.chat.completions.create({ messages: [{ content: 'string', role: 'developer', name: 'name' }], model: 'gpt-4o', - audio: { format: 'wav', voice: 'alloy' }, + audio: { format: 'wav', voice: 'ash' }, frequency_penalty: -2, function_call: 'none', functions: [{ name: 'name', description: 'description', parameters: { foo: 'bar' } }], diff --git a/tests/api-resources/responses/input-items.test.ts b/tests/api-resources/responses/input-items.test.ts index 51b86f1b3..25ab166c0 100644 --- a/tests/api-resources/responses/input-items.test.ts +++ b/tests/api-resources/responses/input-items.test.ts @@ -32,7 +32,7 @@ describe('resource inputItems', () => { await expect( client.responses.inputItems.list( 'response_id', - { after: 'after', before: 'before', limit: 0, order: 'asc' }, + { after: 'after', before: 'before', include: ['file_search_call.results'], limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); From 16c67be29bbb976660ac01d76f54e8735e71c1e0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 16:07:24 +0000 Subject: [PATCH 182/246] feat(api): add `get /chat/completions` endpoint --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 4d1276a5e..1e1104a06 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: d36e491b0afc4f79e3afad4b3c9bec70 +config_hash: 9351ea829c2b41da3b48a38c934c92ee From 9c7d352181c690156e26c9538c00edff6db5b384 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 27 Mar 2025 16:56:35 +0000 Subject: [PATCH 183/246] fix(audio): correctly handle transcription streaming --- src/resources/audio/transcriptions.ts | 7 ++++++- src/streaming.ts | 6 +++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 7f797c709..ba4fec6c5 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -40,7 +40,12 @@ export class Transcriptions extends APIResource { ): Core.APIPromise> { return this._client.post( '/audio/transcriptions', - Core.multipartFormRequestOptions({ body, ...options, __metadata: { model: body.model } }), + Core.multipartFormRequestOptions({ + body, + ...options, + stream: body.stream ?? false, + __metadata: { model: body.model }, + }), ); } } diff --git a/src/streaming.ts b/src/streaming.ts index c9cf2fab8..ee25daca6 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -42,7 +42,11 @@ export class Stream implements AsyncIterable { continue; } - if (sse.event === null || sse.event.startsWith('response.')) { + if ( + sse.event === null || + sse.event.startsWith('response.') || + sse.event.startsWith('transcript.') + ) { let data; try { From 84edc62d05eddaefee0973f9687fcfdd43b0afa9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 17:19:39 +0000 Subject: [PATCH 184/246] fix(internal): work around https://github.com/vercel/next.js/issues/76881 (#1427) --- src/_shims/index-deno.ts | 2 ++ src/_shims/index.d.ts | 2 ++ src/_shims/index.js | 6 +++++- src/_shims/index.mjs | 6 +++++- src/core.ts | 5 +++++ 5 files changed, 19 insertions(+), 2 deletions(-) diff --git a/src/_shims/index-deno.ts b/src/_shims/index-deno.ts index d9eabb5a9..e83c7a6d3 100644 --- a/src/_shims/index-deno.ts +++ b/src/_shims/index-deno.ts @@ -108,3 +108,5 @@ export declare class FsReadStream extends Readable { const _ReadableStream = ReadableStream; type _ReadableStream = ReadableStream; export { _ReadableStream as ReadableStream }; + +export const init = () => {}; diff --git a/src/_shims/index.d.ts b/src/_shims/index.d.ts index d867b293b..107cf7fd6 100644 --- a/src/_shims/index.d.ts +++ b/src/_shims/index.d.ts @@ -79,3 +79,5 @@ export function fileFromPath(path: string, options?: FileFromPathOptions): Promi export function fileFromPath(path: string, filename?: string, options?: FileFromPathOptions): Promise; export function isFsReadStream(value: any): value is FsReadStream; + +export const init: () => void; diff --git a/src/_shims/index.js b/src/_shims/index.js index b5fc8229e..959f2b9ce 100644 --- a/src/_shims/index.js +++ b/src/_shims/index.js @@ -3,7 +3,9 @@ */ const shims = require('./registry'); const auto = require('openai/_shims/auto/runtime'); -if (!shims.kind) shims.setShims(auto.getRuntime(), { auto: true }); +exports.init = () => { + if (!shims.kind) shims.setShims(auto.getRuntime(), { auto: true }); +}; for (const property of Object.keys(shims)) { Object.defineProperty(exports, property, { get() { @@ -11,3 +13,5 @@ for (const property of Object.keys(shims)) { }, }); } + +exports.init(); diff --git a/src/_shims/index.mjs b/src/_shims/index.mjs index 81665e610..26d7a716c 100644 --- a/src/_shims/index.mjs +++ b/src/_shims/index.mjs @@ -3,5 +3,9 @@ */ import * as shims from './registry.mjs'; import * as auto from 'openai/_shims/auto/runtime'; -if (!shims.kind) shims.setShims(auto.getRuntime(), { auto: true }); +export const init = () => { + if (!shims.kind) shims.setShims(auto.getRuntime(), { auto: true }); +}; export * from './registry.mjs'; + +init(); diff --git a/src/core.ts b/src/core.ts index 1e1cb0a4a..0dedc53eb 100644 --- a/src/core.ts +++ b/src/core.ts @@ -17,7 +17,12 @@ import { type RequestInit, type Response, type HeadersInit, + init, } from './_shims/index'; + +// try running side effects outside of _shims/index to workaround https://github.com/vercel/next.js/issues/76881 +init(); + export { type Response }; import { BlobLike, isBlobLike, isMultipartBody } from './uploads'; export { From 8ab47e2b569216d5c67a95e5512a3b09cbc7d261 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 17:20:08 +0000 Subject: [PATCH 185/246] release: 4.90.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 21 +++++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 25 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 05b012220..7b04494d6 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.89.1" + ".": "4.90.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bd7f344f..89523001a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Changelog +## 4.90.0 (2025-03-27) + +Full Changelog: [v4.89.1...v4.90.0](https://github.com/openai/openai-node/compare/v4.89.1...v4.90.0) + +### Features + +* **api:** add `get /chat/completions` endpoint ([2d6710a](https://github.com/openai/openai-node/commit/2d6710a1f9dd4f768d9c73e9c9f5f93c737cdc66)) + + +### Bug Fixes + +* **audio:** correctly handle transcription streaming ([2a9b603](https://github.com/openai/openai-node/commit/2a9b60336cd40a4d4fb9b898ece49170ad648fd0)) +* **internal:** work around https://github.com/vercel/next.js/issues/76881 ([#1427](https://github.com/openai/openai-node/issues/1427)) ([b467e94](https://github.com/openai/openai-node/commit/b467e949476621e8e92587a83c9de6fab35b2b9d)) + + +### Chores + +* add hash of OpenAPI spec/config inputs to .stats.yml ([45db35e](https://github.com/openai/openai-node/commit/45db35e34be560c75bf36224cc153c6d0e6e2a88)) +* **api:** updates to supported Voice IDs ([#1424](https://github.com/openai/openai-node/issues/1424)) ([404f4db](https://github.com/openai/openai-node/commit/404f4db41a2ee651f5bfdaa7b8881e1bf015f058)) +* **client:** expose headers on some streaming errors ([#1423](https://github.com/openai/openai-node/issues/1423)) ([b0783cc](https://github.com/openai/openai-node/commit/b0783cc6221b68f1738e759b393756a7d0e540a3)) + ## 4.89.1 (2025-03-26) Full Changelog: [v4.89.0...v4.89.1](https://github.com/openai/openai-node/compare/v4.89.0...v4.89.1) diff --git a/jsr.json b/jsr.json index 393ef104b..98c8e6959 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.89.1", + "version": "4.90.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 29c52a1a5..408e50a73 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.89.1", + "version": "4.90.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c8c72aa23..03d899bdd 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.89.1'; // x-release-please-version +export const VERSION = '4.90.0'; // x-release-please-version From 85175d9644348e61d9779ce0cb15cfef4aa87a1d Mon Sep 17 00:00:00 2001 From: Khai Tran Date: Fri, 28 Mar 2025 09:44:45 -0700 Subject: [PATCH 186/246] update --- ecosystem-tests/vercel-edge/package-lock.json | 625 +++--------------- ecosystem-tests/vercel-edge/package.json | 2 +- 2 files changed, 79 insertions(+), 548 deletions(-) diff --git a/ecosystem-tests/vercel-edge/package-lock.json b/ecosystem-tests/vercel-edge/package-lock.json index 770dc460a..aaca4370c 100644 --- a/ecosystem-tests/vercel-edge/package-lock.json +++ b/ecosystem-tests/vercel-edge/package-lock.json @@ -9,7 +9,7 @@ "version": "0.1.0", "dependencies": { "ai": "2.1.34", - "next": "^15.2.3", + "next": "^14.2.25", "react": "18.2.0", "react-dom": "18.2.0" }, @@ -777,16 +777,6 @@ "node": ">=16" } }, - "node_modules/@emnapi/runtime": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.3.1.tgz", - "integrity": "sha512-kEBmG8KyqtxJZv+ygbEim+KCGtIq1fC22Ms3S4ziXmYKm8uyoLX0MHONVKwp+9opg390VaKRNt4a7A9NwmpNhw==", - "license": "MIT", - "optional": true, - "dependencies": { - "tslib": "^2.4.0" - } - }, "node_modules/@hapi/hoek": { "version": "9.3.0", "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", @@ -802,367 +792,6 @@ "@hapi/hoek": "^9.0.0" } }, - "node_modules/@img/sharp-darwin-arm64": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.33.5.tgz", - "integrity": "sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ==", - "cpu": [ - "arm64" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-darwin-arm64": "1.0.4" - } - }, - "node_modules/@img/sharp-darwin-x64": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.33.5.tgz", - "integrity": "sha512-fyHac4jIc1ANYGRDxtiqelIbdWkIuQaI84Mv45KvGRRxSAa7o7d1ZKAOBaYbnepLC1WqxfpimdeWfvqqSGwR2Q==", - "cpu": [ - "x64" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-darwin-x64": "1.0.4" - } - }, - "node_modules/@img/sharp-libvips-darwin-arm64": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.0.4.tgz", - "integrity": "sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg==", - "cpu": [ - "arm64" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "darwin" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-libvips-darwin-x64": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.0.4.tgz", - "integrity": "sha512-xnGR8YuZYfJGmWPvmlunFaWJsb9T/AO2ykoP3Fz/0X5XV2aoYBPkX6xqCQvUTKKiLddarLaxpzNe+b1hjeWHAQ==", - "cpu": [ - "x64" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "darwin" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-libvips-linux-arm": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.0.5.tgz", - "integrity": "sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g==", - "cpu": [ - "arm" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "linux" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-libvips-linux-arm64": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.0.4.tgz", - "integrity": "sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA==", - "cpu": [ - "arm64" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "linux" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-libvips-linux-s390x": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.0.4.tgz", - "integrity": "sha512-u7Wz6ntiSSgGSGcjZ55im6uvTrOxSIS8/dgoVMoiGE9I6JAfU50yH5BoDlYA1tcuGS7g/QNtetJnxA6QEsCVTA==", - "cpu": [ - "s390x" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "linux" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-libvips-linux-x64": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.0.4.tgz", - "integrity": "sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw==", - "cpu": [ - "x64" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "linux" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-libvips-linuxmusl-arm64": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.0.4.tgz", - "integrity": "sha512-9Ti+BbTYDcsbp4wfYib8Ctm1ilkugkA/uscUn6UXK1ldpC1JjiXbLfFZtRlBhjPZ5o1NCLiDbg8fhUPKStHoTA==", - "cpu": [ - "arm64" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "linux" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-libvips-linuxmusl-x64": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.0.4.tgz", - "integrity": "sha512-viYN1KX9m+/hGkJtvYYp+CCLgnJXwiQB39damAO7WMdKWlIhmYTfHjwSbQeUK/20vY154mwezd9HflVFM1wVSw==", - "cpu": [ - "x64" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "linux" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-linux-arm": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.33.5.tgz", - "integrity": "sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ==", - "cpu": [ - "arm" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-linux-arm": "1.0.5" - } - }, - "node_modules/@img/sharp-linux-arm64": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.33.5.tgz", - "integrity": "sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA==", - "cpu": [ - "arm64" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-linux-arm64": "1.0.4" - } - }, - "node_modules/@img/sharp-linux-s390x": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.33.5.tgz", - "integrity": "sha512-y/5PCd+mP4CA/sPDKl2961b+C9d+vPAveS33s6Z3zfASk2j5upL6fXVPZi7ztePZ5CuH+1kW8JtvxgbuXHRa4Q==", - "cpu": [ - "s390x" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-linux-s390x": "1.0.4" - } - }, - "node_modules/@img/sharp-linux-x64": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.33.5.tgz", - "integrity": "sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA==", - "cpu": [ - "x64" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-linux-x64": "1.0.4" - } - }, - "node_modules/@img/sharp-linuxmusl-arm64": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.33.5.tgz", - "integrity": "sha512-XrHMZwGQGvJg2V/oRSUfSAfjfPxO+4DkiRh6p2AFjLQztWUuY/o8Mq0eMQVIY7HJ1CDQUJlxGGZRw1a5bqmd1g==", - "cpu": [ - "arm64" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-linuxmusl-arm64": "1.0.4" - } - }, - "node_modules/@img/sharp-linuxmusl-x64": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.33.5.tgz", - "integrity": "sha512-WT+d/cgqKkkKySYmqoZ8y3pxx7lx9vVejxW/W4DOFMYVSkErR+w7mf2u8m/y4+xHe7yY9DAXQMWQhpnMuFfScw==", - "cpu": [ - "x64" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-linuxmusl-x64": "1.0.4" - } - }, - "node_modules/@img/sharp-wasm32": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.33.5.tgz", - "integrity": "sha512-ykUW4LVGaMcU9lu9thv85CbRMAwfeadCJHRsg2GmeRa/cJxsVY9Rbd57JcMxBkKHag5U/x7TSBpScF4U8ElVzg==", - "cpu": [ - "wasm32" - ], - "license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT", - "optional": true, - "dependencies": { - "@emnapi/runtime": "^1.2.0" - }, - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-win32-ia32": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.33.5.tgz", - "integrity": "sha512-T36PblLaTwuVJ/zw/LaH0PdZkRz5rd3SmMHX8GSmR7vtNSP5Z6bQkExdSK7xGWyxLw4sUknBuugTelgw2faBbQ==", - "cpu": [ - "ia32" - ], - "license": "Apache-2.0 AND LGPL-3.0-or-later", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-win32-x64": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.33.5.tgz", - "integrity": "sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg==", - "cpu": [ - "x64" - ], - "license": "Apache-2.0 AND LGPL-3.0-or-later", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - } - }, "node_modules/@istanbuljs/load-nyc-config": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", @@ -1551,19 +1180,17 @@ } }, "node_modules/@next/env": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/env/-/env-15.2.3.tgz", - "integrity": "sha512-a26KnbW9DFEUsSxAxKBORR/uD9THoYoKbkpFywMN/AFvboTt94b8+g/07T8J6ACsdLag8/PDU60ov4rPxRAixw==", - "license": "MIT" + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.25.tgz", + "integrity": "sha512-JnzQ2cExDeG7FxJwqAksZ3aqVJrHjFwZQAEJ9gQZSoEhIow7SNoKZzju/AwQ+PLIR4NY8V0rhcVozx/2izDO0w==" }, "node_modules/@next/swc-darwin-arm64": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.2.3.tgz", - "integrity": "sha512-uaBhA8aLbXLqwjnsHSkxs353WrRgQgiFjduDpc7YXEU0B54IKx3vU+cxQlYwPCyC8uYEEX7THhtQQsfHnvv8dw==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.25.tgz", + "integrity": "sha512-09clWInF1YRd6le00vt750s3m7SEYNehz9C4PUcSu3bAdCTpjIV4aTYQZ25Ehrr83VR1rZeqtKUPWSI7GfuKZQ==", "cpu": [ "arm64" ], - "license": "MIT", "optional": true, "os": [ "darwin" @@ -1573,13 +1200,12 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.2.3.tgz", - "integrity": "sha512-pVwKvJ4Zk7h+4hwhqOUuMx7Ib02u3gDX3HXPKIShBi9JlYllI0nU6TWLbPT94dt7FSi6mSBhfc2JrHViwqbOdw==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.25.tgz", + "integrity": "sha512-V+iYM/QR+aYeJl3/FWWU/7Ix4b07ovsQ5IbkwgUK29pTHmq+5UxeDr7/dphvtXEq5pLB/PucfcBNh9KZ8vWbug==", "cpu": [ "x64" ], - "license": "MIT", "optional": true, "os": [ "darwin" @@ -1589,13 +1215,12 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.2.3.tgz", - "integrity": "sha512-50ibWdn2RuFFkOEUmo9NCcQbbV9ViQOrUfG48zHBCONciHjaUKtHcYFiCwBVuzD08fzvzkWuuZkd4AqbvKO7UQ==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.25.tgz", + "integrity": "sha512-LFnV2899PJZAIEHQ4IMmZIgL0FBieh5keMnriMY1cK7ompR+JUd24xeTtKkcaw8QmxmEdhoE5Mu9dPSuDBgtTg==", "cpu": [ "arm64" ], - "license": "MIT", "optional": true, "os": [ "linux" @@ -1605,13 +1230,12 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.2.3.tgz", - "integrity": "sha512-2gAPA7P652D3HzR4cLyAuVYwYqjG0mt/3pHSWTCyKZq/N/dJcUAEoNQMyUmwTZWCJRKofB+JPuDVP2aD8w2J6Q==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.25.tgz", + "integrity": "sha512-QC5y5PPTmtqFExcKWKYgUNkHeHE/z3lUsu83di488nyP0ZzQ3Yse2G6TCxz6nNsQwgAx1BehAJTZez+UQxzLfw==", "cpu": [ "arm64" ], - "license": "MIT", "optional": true, "os": [ "linux" @@ -1621,13 +1245,12 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.2.3.tgz", - "integrity": "sha512-ODSKvrdMgAJOVU4qElflYy1KSZRM3M45JVbeZu42TINCMG3anp7YCBn80RkISV6bhzKwcUqLBAmOiWkaGtBA9w==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.25.tgz", + "integrity": "sha512-y6/ML4b9eQ2D/56wqatTJN5/JR8/xdObU2Fb1RBidnrr450HLCKr6IJZbPqbv7NXmje61UyxjF5kvSajvjye5w==", "cpu": [ "x64" ], - "license": "MIT", "optional": true, "os": [ "linux" @@ -1637,13 +1260,12 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.2.3.tgz", - "integrity": "sha512-ZR9kLwCWrlYxwEoytqPi1jhPd1TlsSJWAc+H/CJHmHkf2nD92MQpSRIURR1iNgA/kuFSdxB8xIPt4p/T78kwsg==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.25.tgz", + "integrity": "sha512-sPX0TSXHGUOZFvv96GoBXpB3w4emMqKeMgemrSxI7A6l55VBJp/RKYLwZIB9JxSqYPApqiREaIIap+wWq0RU8w==", "cpu": [ "x64" ], - "license": "MIT", "optional": true, "os": [ "linux" @@ -1653,13 +1275,27 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.2.3.tgz", - "integrity": "sha512-+G2FrDcfm2YDbhDiObDU/qPriWeiz/9cRR0yMWJeTLGGX6/x8oryO3tt7HhodA1vZ8r2ddJPCjtLcpaVl7TE2Q==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.25.tgz", + "integrity": "sha512-ReO9S5hkA1DU2cFCsGoOEp7WJkhFzNbU/3VUF6XxNGUCQChyug6hZdYL/istQgfT/GWE6PNIg9cm784OI4ddxQ==", "cpu": [ "arm64" ], - "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-ia32-msvc": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.25.tgz", + "integrity": "sha512-DZ/gc0o9neuCDyD5IumyTGHVun2dCox5TfPQI/BJTYwpSNYM3CZDI4i6TOdjeq1JMo+Ug4kPSMuZdwsycwFbAw==", + "cpu": [ + "ia32" + ], "optional": true, "os": [ "win32" @@ -1669,13 +1305,12 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.2.3.tgz", - "integrity": "sha512-gHYS9tc+G2W0ZC8rBL+H6RdtXIyk40uLiaos0yj5US85FNhbFEndMA2nW3z47nzOWiSvXTZ5kBClc3rD0zJg0w==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.25.tgz", + "integrity": "sha512-KSznmS6eFjQ9RJ1nEc66kJvtGIL1iZMYmGEXsZPh2YtnLtqrgdVvKXJY2ScjjoFnG6nGLyPFR0UiEvDwVah4Tw==", "cpu": [ "x64" ], - "license": "MIT", "optional": true, "os": [ "win32" @@ -1786,16 +1421,15 @@ "node_modules/@swc/counter": { "version": "0.1.3", "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", - "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", - "license": "Apache-2.0" + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==" }, "node_modules/@swc/helpers": { - "version": "0.5.15", - "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.15.tgz", - "integrity": "sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==", - "license": "Apache-2.0", + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", + "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", "dependencies": { - "tslib": "^2.8.0" + "@swc/counter": "^0.1.3", + "tslib": "^2.4.0" } }, "node_modules/@ts-morph/common": { @@ -3023,8 +2657,7 @@ "node_modules/client-only": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", - "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", - "license": "MIT" + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==" }, "node_modules/cliui": { "version": "8.0.1", @@ -3075,25 +2708,11 @@ "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", "dev": true }, - "node_modules/color": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/color/-/color-4.2.3.tgz", - "integrity": "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==", - "license": "MIT", - "optional": true, - "dependencies": { - "color-convert": "^2.0.1", - "color-string": "^1.9.0" - }, - "engines": { - "node": ">=12.5.0" - } - }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "devOptional": true, + "dev": true, "dependencies": { "color-name": "~1.1.4" }, @@ -3105,18 +2724,7 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "devOptional": true - }, - "node_modules/color-string": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", - "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", - "license": "MIT", - "optional": true, - "dependencies": { - "color-name": "^1.0.0", - "simple-swizzle": "^0.2.2" - } + "dev": true }, "node_modules/color-support": { "version": "1.1.3", @@ -3301,7 +2909,7 @@ "version": "2.0.3", "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz", "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==", - "devOptional": true, + "dev": true, "license": "Apache-2.0", "engines": { "node": ">=8" @@ -4191,8 +3799,7 @@ "node_modules/graceful-fs": { "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", - "dev": true + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" }, "node_modules/has-flag": { "version": "4.0.0", @@ -5461,42 +5068,40 @@ "dev": true }, "node_modules/next": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/next/-/next-15.2.3.tgz", - "integrity": "sha512-x6eDkZxk2rPpu46E1ZVUWIBhYCLszmUY6fvHBFcbzJ9dD+qRX6vcHusaqqDlnY+VngKzKbAiG2iRCkPbmi8f7w==", - "license": "MIT", - "dependencies": { - "@next/env": "15.2.3", - "@swc/counter": "0.1.3", - "@swc/helpers": "0.5.15", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.25.tgz", + "integrity": "sha512-N5M7xMc4wSb4IkPvEV5X2BRRXUmhVHNyaXwEM86+voXthSZz8ZiRyQW4p9mwAoAPIm6OzuVZtn7idgEJeAJN3Q==", + "dependencies": { + "@next/env": "14.2.25", + "@swc/helpers": "0.5.5", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", + "graceful-fs": "^4.2.11", "postcss": "8.4.31", - "styled-jsx": "5.1.6" + "styled-jsx": "5.1.1" }, "bin": { "next": "dist/bin/next" }, "engines": { - "node": "^18.18.0 || ^19.8.0 || >= 20.0.0" + "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "15.2.3", - "@next/swc-darwin-x64": "15.2.3", - "@next/swc-linux-arm64-gnu": "15.2.3", - "@next/swc-linux-arm64-musl": "15.2.3", - "@next/swc-linux-x64-gnu": "15.2.3", - "@next/swc-linux-x64-musl": "15.2.3", - "@next/swc-win32-arm64-msvc": "15.2.3", - "@next/swc-win32-x64-msvc": "15.2.3", - "sharp": "^0.33.5" + "@next/swc-darwin-arm64": "14.2.25", + "@next/swc-darwin-x64": "14.2.25", + "@next/swc-linux-arm64-gnu": "14.2.25", + "@next/swc-linux-arm64-musl": "14.2.25", + "@next/swc-linux-x64-gnu": "14.2.25", + "@next/swc-linux-x64-musl": "14.2.25", + "@next/swc-win32-arm64-msvc": "14.2.25", + "@next/swc-win32-ia32-msvc": "14.2.25", + "@next/swc-win32-x64-msvc": "14.2.25" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", "@playwright/test": "^1.41.2", - "babel-plugin-react-compiler": "*", - "react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", - "react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", + "react": "^18.2.0", + "react-dom": "^18.2.0", "sass": "^1.3.0" }, "peerDependenciesMeta": { @@ -5506,9 +5111,6 @@ "@playwright/test": { "optional": true }, - "babel-plugin-react-compiler": { - "optional": true - }, "sass": { "optional": true } @@ -6201,59 +5803,6 @@ "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==", "dev": true }, - "node_modules/sharp": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.33.5.tgz", - "integrity": "sha512-haPVm1EkS9pgvHrQ/F3Xy+hgcuMV0Wm9vfIBSiwZ05k+xgb0PkBQpGsAA/oWdDobNaZTH5ppvHtzCFbnSEwHVw==", - "hasInstallScript": true, - "license": "Apache-2.0", - "optional": true, - "dependencies": { - "color": "^4.2.3", - "detect-libc": "^2.0.3", - "semver": "^7.6.3" - }, - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-darwin-arm64": "0.33.5", - "@img/sharp-darwin-x64": "0.33.5", - "@img/sharp-libvips-darwin-arm64": "1.0.4", - "@img/sharp-libvips-darwin-x64": "1.0.4", - "@img/sharp-libvips-linux-arm": "1.0.5", - "@img/sharp-libvips-linux-arm64": "1.0.4", - "@img/sharp-libvips-linux-s390x": "1.0.4", - "@img/sharp-libvips-linux-x64": "1.0.4", - "@img/sharp-libvips-linuxmusl-arm64": "1.0.4", - "@img/sharp-libvips-linuxmusl-x64": "1.0.4", - "@img/sharp-linux-arm": "0.33.5", - "@img/sharp-linux-arm64": "0.33.5", - "@img/sharp-linux-s390x": "0.33.5", - "@img/sharp-linux-x64": "0.33.5", - "@img/sharp-linuxmusl-arm64": "0.33.5", - "@img/sharp-linuxmusl-x64": "0.33.5", - "@img/sharp-wasm32": "0.33.5", - "@img/sharp-win32-ia32": "0.33.5", - "@img/sharp-win32-x64": "0.33.5" - } - }, - "node_modules/sharp/node_modules/semver": { - "version": "7.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", - "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", - "license": "ISC", - "optional": true, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", @@ -6287,23 +5836,6 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/simple-swizzle": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", - "integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==", - "license": "MIT", - "optional": true, - "dependencies": { - "is-arrayish": "^0.3.1" - } - }, - "node_modules/simple-swizzle/node_modules/is-arrayish": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", - "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==", - "license": "MIT", - "optional": true - }, "node_modules/sisteransi": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", @@ -6535,10 +6067,9 @@ } }, "node_modules/styled-jsx": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.6.tgz", - "integrity": "sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==", - "license": "MIT", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.1.tgz", + "integrity": "sha512-pW7uC1l4mBZ8ugbiZrcIsiIvVx1UmTfw7UkC3Um2tmfUq9Bhk8IiyEIPl6F8agHgjzku6j0xQEZbfA5uSgSaCw==", "dependencies": { "client-only": "0.0.1" }, @@ -6546,7 +6077,7 @@ "node": ">= 12.0.0" }, "peerDependencies": { - "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0" + "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0" }, "peerDependenciesMeta": { "@babel/core": { diff --git a/ecosystem-tests/vercel-edge/package.json b/ecosystem-tests/vercel-edge/package.json index 420bca941..5a8fea816 100644 --- a/ecosystem-tests/vercel-edge/package.json +++ b/ecosystem-tests/vercel-edge/package.json @@ -15,7 +15,7 @@ }, "dependencies": { "ai": "2.1.34", - "next": "^15.2.3", + "next": "^14.2.25", "react": "18.2.0", "react-dom": "18.2.0" }, From 3676d34127cd88a67dde6e6d24f2b6a7b65d3073 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 19:41:56 +0000 Subject: [PATCH 187/246] feat(api): add `get /responses/{response_id}/input_items` endpoint --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 1e1104a06..f6a90d243 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: 9351ea829c2b41da3b48a38c934c92ee +config_hash: e25e31d8446b6bc0e3ef7103b6993cce From be00d29fadb2b78920bcae1e6e72750bc6f973a4 Mon Sep 17 00:00:00 2001 From: Wassim Chegham Date: Fri, 28 Mar 2025 21:46:46 +0100 Subject: [PATCH 188/246] perf(embedding): default embedding creation to base64 (#1312) * perf(embedding): always request embedding creation as base64 Requesting base64 encoded embeddings returns smaller body sizes, on average ~60% smaller than float32 encoded. In other words, the size of the response body containing embeddings in float32 is ~2.3x bigger than base64 encoded embedding. We always request embedding creating encoded as base64, and then decoded them to float32 based on the user's provided encoding_format parameter. Closes #1310 Co-authored-by: Robert Craigie --- src/core.ts | 21 +++++++++++++ src/resources/embeddings.ts | 42 ++++++++++++++++++++++++-- tests/api-resources/embeddings.test.ts | 31 +++++++++++++++++++ 3 files changed, 92 insertions(+), 2 deletions(-) diff --git a/src/core.ts b/src/core.ts index 0dedc53eb..a3f664906 100644 --- a/src/core.ts +++ b/src/core.ts @@ -1287,6 +1287,27 @@ export const toBase64 = (str: string | null | undefined): string => { throw new OpenAIError('Cannot generate b64 string; Expected `Buffer` or `btoa` to be defined'); }; +/** + * Converts a Base64 encoded string to a Float32Array. + * @param base64Str - The Base64 encoded string. + * @returns An Array of numbers interpreted as Float32 values. + */ +export const toFloat32Array = (base64Str: string): Array => { + if (typeof Buffer !== 'undefined') { + // for Node.js environment + return Array.from(new Float32Array(Buffer.from(base64Str, 'base64').buffer)); + } else { + // for legacy web platform APIs + const binaryStr = atob(base64Str); + const len = binaryStr.length; + const bytes = new Uint8Array(len); + for (let i = 0; i < len; i++) { + bytes[i] = binaryStr.charCodeAt(i); + } + return Array.from(new Float32Array(bytes.buffer)); + } +}; + export function isObj(obj: unknown): obj is Record { return obj != null && typeof obj === 'object' && !Array.isArray(obj); } diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index d01ffc807..a4be9ca3c 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -9,9 +9,47 @@ export class Embeddings extends APIResource { */ create( body: EmbeddingCreateParams, - options?: Core.RequestOptions, + options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post('/embeddings', { body, ...options }); + const hasUserProvidedEncodingFormat = !!body.encoding_format; + // No encoding_format specified, defaulting to base64 for performance reasons + // See https://github.com/openai/openai-node/pull/1312 + let encoding_format: EmbeddingCreateParams['encoding_format'] = + hasUserProvidedEncodingFormat ? body.encoding_format : 'base64'; + + if (hasUserProvidedEncodingFormat) { + Core.debug('Request', 'User defined encoding_format:', body.encoding_format); + } + + const response: Core.APIPromise = this._client.post('/embeddings', { + body: { + ...body, + encoding_format: encoding_format as EmbeddingCreateParams['encoding_format'], + }, + ...options, + }); + + // if the user specified an encoding_format, return the response as-is + if (hasUserProvidedEncodingFormat) { + return response; + } + + // in this stage, we are sure the user did not specify an encoding_format + // and we defaulted to base64 for performance reasons + // we are sure then that the response is base64 encoded, let's decode it + // the returned result will be a float32 array since this is OpenAI API's default encoding + Core.debug('response', 'Decoding base64 embeddings to float32 array'); + + return (response as Core.APIPromise)._thenUnwrap((response) => { + if (response && response.data) { + response.data.forEach((embeddingBase64Obj) => { + const embeddingBase64Str = embeddingBase64Obj.embedding as unknown as string; + embeddingBase64Obj.embedding = Core.toFloat32Array(embeddingBase64Str); + }); + } + + return response; + }); } } diff --git a/tests/api-resources/embeddings.test.ts b/tests/api-resources/embeddings.test.ts index 46dd1b2a3..e226ade9e 100644 --- a/tests/api-resources/embeddings.test.ts +++ b/tests/api-resources/embeddings.test.ts @@ -32,4 +32,35 @@ describe('resource embeddings', () => { user: 'user-1234', }); }); + + test('create: encoding_format=float should create float32 embeddings', async () => { + const response = await client.embeddings.create({ + input: 'The quick brown fox jumped over the lazy dog', + model: 'text-embedding-3-small', + }); + + expect(response.data?.at(0)?.embedding).toBeInstanceOf(Array); + expect(Number.isFinite(response.data?.at(0)?.embedding.at(0))).toBe(true); + }); + + test('create: encoding_format=base64 should create float32 embeddings', async () => { + const response = await client.embeddings.create({ + input: 'The quick brown fox jumped over the lazy dog', + model: 'text-embedding-3-small', + encoding_format: 'base64', + }); + + expect(response.data?.at(0)?.embedding).toBeInstanceOf(Array); + expect(Number.isFinite(response.data?.at(0)?.embedding.at(0))).toBe(true); + }); + + test('create: encoding_format=default should create float32 embeddings', async () => { + const response = await client.embeddings.create({ + input: 'The quick brown fox jumped over the lazy dog', + model: 'text-embedding-3-small', + }); + + expect(response.data?.at(0)?.embedding).toBeInstanceOf(Array); + expect(Number.isFinite(response.data?.at(0)?.embedding.at(0))).toBe(true); + }); }); From ca69782d2eb83e01d56ea81637133caadddef786 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 31 Mar 2025 14:58:34 +0000 Subject: [PATCH 189/246] release: 4.91.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 7b04494d6..f6df5bd5c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.90.0" + ".": "4.91.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 89523001a..8cf3201bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.91.0 (2025-03-31) + +Full Changelog: [v4.90.0...v4.91.0](https://github.com/openai/openai-node/compare/v4.90.0...v4.91.0) + +### Features + +* **api:** add `get /responses/{response_id}/input_items` endpoint ([ef0e0ac](https://github.com/openai/openai-node/commit/ef0e0acd469379ae6f2745c83e6c6813ff7b4edc)) + + +### Performance Improvements + +* **embedding:** default embedding creation to base64 ([#1312](https://github.com/openai/openai-node/issues/1312)) ([e54530e](https://github.com/openai/openai-node/commit/e54530e4f6f00d7d74fc8636bbdb6f6280548750)), closes [#1310](https://github.com/openai/openai-node/issues/1310) + ## 4.90.0 (2025-03-27) Full Changelog: [v4.89.1...v4.90.0](https://github.com/openai/openai-node/compare/v4.89.1...v4.90.0) diff --git a/jsr.json b/jsr.json index 98c8e6959..4595ab4b7 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.90.0", + "version": "4.91.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 408e50a73..089656265 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.90.0", + "version": "4.91.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 03d899bdd..0095d88c8 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.90.0'; // x-release-please-version +export const VERSION = '4.91.0'; // x-release-please-version From 71950f6e891ba0813c25b2992db93a61ef6c9664 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 00:26:27 +0000 Subject: [PATCH 190/246] chore: Remove deprecated/unused remote spec feature --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index f6a90d243..2ccfd3411 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: e25e31d8446b6bc0e3ef7103b6993cce +config_hash: 2daae06cc598821ccf87201de0861e40 From 1847673de09586c809e1057a6b08c604471e13ff Mon Sep 17 00:00:00 2001 From: stainless-bot Date: Tue, 1 Apr 2025 14:36:41 -0400 Subject: [PATCH 191/246] fix(docs): correct docstring on responses.stream --- src/resources/responses/responses.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 6c9f58b43..a46c4182c 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -128,7 +128,7 @@ export class Responses extends APIResource { } /** - * Creates a chat completion stream + * Creates a model response stream */ stream>( body: Params, From e080e12cd3ab75ddc843746e7baa8fbf38f8a031 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 18:37:12 +0000 Subject: [PATCH 192/246] release: 4.91.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f6df5bd5c..0fdb6f309 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.91.0" + ".": "4.91.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 8cf3201bb..0de0d9630 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.91.1 (2025-04-01) + +Full Changelog: [v4.91.0...v4.91.1](https://github.com/openai/openai-node/compare/v4.91.0...v4.91.1) + +### Bug Fixes + +* **docs:** correct docstring on responses.stream ([1c8cd6a](https://github.com/openai/openai-node/commit/1c8cd6a638128b0ff5fac89d6c7db256f0b63a85)) + + +### Chores + +* Remove deprecated/unused remote spec feature ([ce3dfa8](https://github.com/openai/openai-node/commit/ce3dfa88bd4d395debccc0e6e1aac6d218b07cb8)) + ## 4.91.0 (2025-03-31) Full Changelog: [v4.90.0...v4.91.0](https://github.com/openai/openai-node/compare/v4.90.0...v4.91.0) diff --git a/jsr.json b/jsr.json index 4595ab4b7..9bd85f8c9 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.91.0", + "version": "4.91.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 089656265..cfa3e6201 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.91.0", + "version": "4.91.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 0095d88c8..85314d847 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.91.0'; // x-release-please-version +export const VERSION = '4.91.1'; // x-release-please-version From 32afb0022939b19069c37fcd9cabfe666ea86b77 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 23:12:23 +0000 Subject: [PATCH 193/246] feat(api): manual updates --- .stats.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index 2ccfd3411..71ac95541 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: 2daae06cc598821ccf87201de0861e40 +config_hash: 31a12443afeef2933b34e2de23c40954 From efce6d3d719ad463b035b22e9a1cf461ab62b5af Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 23:18:37 +0000 Subject: [PATCH 194/246] feat(api): manual updates --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 71ac95541..baad2afc1 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: 31a12443afeef2933b34e2de23c40954 +config_hash: 178ba1bfb1237bf6b94abb3408072aa7 From 5e5e4607a103fcb6257c071bb4bf57902ee6415f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 13:34:13 +0000 Subject: [PATCH 195/246] fix(client): send `X-Stainless-Timeout` in seconds (#1442) --- src/core.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core.ts b/src/core.ts index a3f664906..1858a31e8 100644 --- a/src/core.ts +++ b/src/core.ts @@ -406,7 +406,7 @@ export abstract class APIClient { getHeader(headers, 'x-stainless-timeout') === undefined && options.timeout ) { - reqHeaders['x-stainless-timeout'] = String(options.timeout); + reqHeaders['x-stainless-timeout'] = String(Math.trunc(options.timeout / 1000)); } this.validateHeaders(reqHeaders, headers); From c1c281983e23dcfdca964720265d3cba28b17795 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 14:49:58 +0000 Subject: [PATCH 196/246] feat(api): manual updates --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index baad2afc1..675edb075 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: 178ba1bfb1237bf6b94abb3408072aa7 +config_hash: 578c5bff4208d560c0c280f13324409f From 1cb66b6ccbcecaa6e48b90d37d8cac4840bb69a4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 20:55:24 +0000 Subject: [PATCH 197/246] chore(internal): add aliases for Record and Array (#1443) --- src/core.ts | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/core.ts b/src/core.ts index 1858a31e8..ee445daf1 100644 --- a/src/core.ts +++ b/src/core.ts @@ -34,6 +34,20 @@ export { export type Fetch = (url: RequestInfo, init?: RequestInit) => Promise; +/** + * An alias to the builtin `Array` type so we can + * easily alias it in import statements if there are name clashes. + */ +type _Array = Array; + +/** + * An alias to the builtin `Record` type so we can + * easily alias it in import statements if there are name clashes. + */ +type _Record = Record; + +export type { _Array as Array, _Record as Record }; + type PromiseOrValue = T | Promise; type APIResponseProps = { From 4af79ddd5b19925fa09d9ae877470aa8304535c2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 3 Apr 2025 18:22:30 +0000 Subject: [PATCH 198/246] fix(api): improve type resolution when importing as a package (#1444) --- packages/mcp-server/src/tools.ts | 1 + 1 file changed, 1 insertion(+) create mode 100644 packages/mcp-server/src/tools.ts diff --git a/packages/mcp-server/src/tools.ts b/packages/mcp-server/src/tools.ts new file mode 100644 index 000000000..7e516de7c --- /dev/null +++ b/packages/mcp-server/src/tools.ts @@ -0,0 +1 @@ +export * from './tools/index'; From b893d81420359c712dab6997c2dbc9f309549712 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 3 Apr 2025 18:37:52 +0000 Subject: [PATCH 199/246] feat(api): manual updates --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 675edb075..aebb90c8c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: 578c5bff4208d560c0c280f13324409f +config_hash: bcd2cacdcb9fae9938f273cd167f613c From 4ba994773b41a3ed05a3ad908b235fc5f3810dfc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 4 Apr 2025 21:09:16 +0000 Subject: [PATCH 200/246] fix(mcp): remove unused tools.ts (#1445) --- packages/mcp-server/src/tools.ts | 1 - 1 file changed, 1 deletion(-) delete mode 100644 packages/mcp-server/src/tools.ts diff --git a/packages/mcp-server/src/tools.ts b/packages/mcp-server/src/tools.ts deleted file mode 100644 index 7e516de7c..000000000 --- a/packages/mcp-server/src/tools.ts +++ /dev/null @@ -1 +0,0 @@ -export * from './tools/index'; From d6b99c8fcbd35ef6b45d66f487aea759c01febbc Mon Sep 17 00:00:00 2001 From: Richard Dzenis Date: Mon, 7 Apr 2025 14:00:53 +0300 Subject: [PATCH 201/246] fix(embeddings): correctly decode base64 data (#1448) * Fix Core.toFloat32Array, Buffer version According to NodeJS docs Buffer.buffer is not guaranteed to correspond exactly to the original Buffer. [1] The previous implementation could use buffer garbage while converting bytes to floats. [1] https://nodejs.org/api/buffer.html#bufbuffer * add tests for embeddings data * fix formatting --------- Co-authored-by: Robert Craigie --- src/core.ts | 5 +- .../embeddings-base64-response.json | 1 + .../embeddings-float-response.json | 1 + tests/api-resources/embeddings.test.ts | 57 ++++++++++++++++--- 4 files changed, 55 insertions(+), 9 deletions(-) create mode 100644 tests/api-resources/embeddings-base64-response.json create mode 100644 tests/api-resources/embeddings-float-response.json diff --git a/src/core.ts b/src/core.ts index ee445daf1..ccc677e0e 100644 --- a/src/core.ts +++ b/src/core.ts @@ -1309,7 +1309,10 @@ export const toBase64 = (str: string | null | undefined): string => { export const toFloat32Array = (base64Str: string): Array => { if (typeof Buffer !== 'undefined') { // for Node.js environment - return Array.from(new Float32Array(Buffer.from(base64Str, 'base64').buffer)); + const buf = Buffer.from(base64Str, 'base64'); + return Array.from( + new Float32Array(buf.buffer, buf.byteOffset, buf.length / Float32Array.BYTES_PER_ELEMENT), + ); } else { // for legacy web platform APIs const binaryStr = atob(base64Str); diff --git a/tests/api-resources/embeddings-base64-response.json b/tests/api-resources/embeddings-base64-response.json new file mode 100644 index 000000000..9b0f7629c --- /dev/null +++ b/tests/api-resources/embeddings-base64-response.json @@ -0,0 +1 @@ +{"object":"list","data":[{"object":"embedding","index":0,"embedding":"A1fLvaC4Bb0QB7w8yEvrPOm9Xj2r0yA8EW4sPRq75j3Fbiq81/chPumAGb0afqG8R6AFvpzsQT35SPO7Hi39PEMAir1lf0A92McfvRoVlLxQv9o9tHqIvQYlrL0fwlK8sufPPYz2gjzH5Ho93GebvN+eCTxjRjW8PJRKvXMtFD4+n3C9ByMPO39Gkjs1Jm49A1fLPdNXpjv8RLm92McfveKpLz01VNO9SUIevhAHvD0flG09+9srvW5j7Txp8dY8LW4Ju08bJb1GdL29g+aNPWlLBD1p8dY8LkCkvfPLtjxcBj4+1/ehPebv/bz/Ifo8SqkOvREFHzyAr588HbUPPbFS+r00gri825WAPQlcGj1qHZ+8o8EOPo880Tn5dli9zRUSPc2APD0b5RG9mhxEvTyUSj3FQMU95u/9vE20tD3wwBC94NmxvXSUhL3Ofh8904WLPRbeJb2Paja8BClmvhwgOj2e6Ic9em0LPdj1BD3lSau7dJQEPJi107yB6kc97sTKO6lAaD2YDwE9YDuPPSFVC735dtg9SK1IOysJNrwtQkE8BmJxPb2ZXT0hVYs9g+YNvLfuuz2nyhe9z7nHN5UVWDxea5E77F1avTIbyL256oG9ft+hPVWJAbwNoug82TCtvUrm072wgN86JPWGO3TRyTwOY4a8xJwPvkx5DL1f1B68RwkTvja7Q72BrQI9Pfs6PTdfeb3RxG09jJxVvfl22D3eCbQ9FbR6vTPtYrn0mzS+kqGkPDxXhbwyG8i98M9wveayuL1EpL88lNqvve3yL70RQmQ7VcZGPaPBjr1wyEA9fKaWOskMibwNomi8J9Rku9EeGz016Si8O1mivQ38lb0EgxO88P1VvcilmLuNA0a9lj8DvHCceD3lSSs9uFWsve6HBT6XEZ68ShS5PFJSE70dTIK86OvDvSNgsbzS8DU8bPz8PAuVpTxKQIE9/NmOPBhFFj7LsL67PJRKvIxu8LwSqVS8D8yTPSOOlj1g0gG8A+69vYz2AjxPhLK80fLSPbrL/LztWz09LAcZvqfKF73B/JO8lnzIvCk5OLxwMU69dmQCvQtp3bs6hwe9WZKKume4S7x3CLg9zK4hPLsjDT16P6a7MbTXPRp+IT0dtQ89GayGvcngwD2F8bO70R4bu8tFlDxcBr67xAWdvdnWfzzQTIC9zn6fPYSKwz3alx28h8GxPW74wj3eNxk+xUBFvIpjyj0WdRi9AkoIPXhvqLugx+U8F0ezvUlCHjx3NAC9uvlhPEOmXD36oAM9D56uvddgrz2giiC9GhWUvHrWGLv0yRk8fOPbvMc+KLs7//S8v5UjPJUV2D0KLjW6YKa5PDciNDuJznQ9USZLPQ=="}],"model":"text-embedding-3-large","usage":{"prompt_tokens":1,"total_tokens":1}} \ No newline at end of file diff --git a/tests/api-resources/embeddings-float-response.json b/tests/api-resources/embeddings-float-response.json new file mode 100644 index 000000000..9b5b788e2 --- /dev/null +++ b/tests/api-resources/embeddings-float-response.json @@ -0,0 +1 @@ +{"object":"list","data":[{"object":"embedding","index":0,"embedding":[-0.099287055,-0.032646775,0.022952586,0.028722659,0.05438033,0.009816091,0.042097155,0.112661555,-0.010402386,0.158172,-0.037476454,-0.01971345,-0.13049422,0.04734479,-0.0074244705,0.030905303,-0.06738331,0.046996493,-0.039008945,-0.018076468,0.10681021,-0.06664029,-0.08405499,-0.012863665,0.10151614,0.015986703,0.061253335,-0.018970422,0.008399694,-0.011064145,-0.049457774,0.14470463,-0.058745615,0.0021840946,0.00446397,0.058141906,0.099287055,0.0050763874,-0.09046361,-0.039008945,0.042886622,-0.103187956,-0.15454973,0.091810346,0.058002587,-0.041957837,0.028978076,0.02623816,-0.002097021,-0.040309247,-0.09250693,0.06928732,0.03229848,0.02623816,-0.08020054,0.022314047,0.18557113,0.079086,-0.030998182,0.030533789,-0.034829415,0.009705798,0.019492865,0.035084832,-0.122228034,-0.022523023,0.06278583,0.037685428,-0.019423205,0.13941054,0.00039908706,-0.052847836,0.035665322,0.04602127,-0.035618883,-0.04787884,0.049457774,0.096314944,-0.030998182,0.08823452,-0.03534025,-0.086841345,-0.06473628,0.03893929,0.06812634,-0.040495,-0.011133804,-0.22476584,0.045440778,0.06636165,0.03403995,0.032461017,-0.005227315,0.008092035,-0.025843427,0.048807625,0.0061880266,0.05670229,0.031509012,0.06993747,-0.034016732,0.10569567,0.0030620862,-0.011110584,0.011795563,0.058931373,0.054101694,0.068033464,-0.008660915,0.091763906,-0.0370585,0.000023809172,0.013188739,0.004437848,-0.053312227,-0.09770812,-0.06343598,0.07903956,-0.007906278,0.028397584,-0.084565826,-0.103466585,0.0017051902,0.0041185785,0.024636008,-0.016404655,-0.14024645,-0.034295365,-0.009694188,-0.14359008,-0.04778596,0.031903747,0.045649756,-0.06088182,0.058049027,-0.052151248,0.10569567,0.087909445,-0.061206896,-0.00021641403,-0.17637616,0.020096574,-0.016276948,-0.09770812,-0.058792055,-0.09018497,0.023393758,-0.08586612,-0.04295628,0.0034829418,0.048528988,-0.06970527,0.047066152,0.0011493708,-0.01672973,-0.014198792,-0.0034916492,0.037871186,-0.010309507,-0.079271756,-0.073234655,-0.0090034045,-0.052244127,-0.0046584345,-0.04834323,-0.008010766,0.060696065,0.04181852,-0.08414787,0.13040134,-0.019295497,0.022592682,-0.03596718,-0.015905434,-0.0956648,-0.021652287,0.011104779,0.030882083,0.02021267,0.0631109,0.017437927,0.14674795,-0.005819415,-0.012364443,-0.029349588,-0.012979763,0.072166555,0.07351329,-0.007923692,-0.09273913,0.007993352,-0.021791605,0.1030022,-0.030858863,0.046230245,-0.14944142,-0.0370585,-0.018064858,-0.02447347,-0.011244097,-0.050340116,-0.03183409,-0.006756907,-0.033087946,-0.001057218,-0.012434102,0.089859895,0.009868335,0.034457903,-0.005073485,0.10532416,0.0394269,0.035084832,-0.06575794,0.09417874,-0.005491438,-0.002366949,0.018099686,-0.005799098,-0.07667115,0.0156151885,-0.06264651,0.07787858,0.09547904,-0.009618724,0.086794905,0.095200405,0.14962718,-0.012039368,0.09882267,-0.037221037,0.033273704,-0.0051402412,0.02804929,-0.08753794,0.009659358,-0.031300034,0.01379245,0.053869497,0.03213594,-0.08526241,0.085633926,-0.039194703,-0.018076468,-0.0023321197,0.009386528,-0.026841871,-0.0025672184,-0.02990686,0.009984433,0.105509914,-0.00069114624,0.022662342,0.0027486214,0.05976728,0.04959709]}],"model":"text-embedding-3-large","usage":{"prompt_tokens":1,"total_tokens":1}} \ No newline at end of file diff --git a/tests/api-resources/embeddings.test.ts b/tests/api-resources/embeddings.test.ts index e226ade9e..629265643 100644 --- a/tests/api-resources/embeddings.test.ts +++ b/tests/api-resources/embeddings.test.ts @@ -2,6 +2,9 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; +import { mockFetch } from '../utils/mock-fetch'; +import fs from 'fs/promises'; +import Path from 'path'; const client = new OpenAI({ apiKey: 'My API Key', @@ -33,34 +36,72 @@ describe('resource embeddings', () => { }); }); - test('create: encoding_format=float should create float32 embeddings', async () => { + test('create: encoding_format=default should create float32 embeddings', async () => { + const client = makeClient(); const response = await client.embeddings.create({ input: 'The quick brown fox jumped over the lazy dog', model: 'text-embedding-3-small', }); expect(response.data?.at(0)?.embedding).toBeInstanceOf(Array); - expect(Number.isFinite(response.data?.at(0)?.embedding.at(0))).toBe(true); + expect(response.data?.at(0)?.embedding.at(0)).toBe(-0.09928705543279648); }); - test('create: encoding_format=base64 should create float32 embeddings', async () => { + test('create: encoding_format=float should create float32 embeddings', async () => { + const client = makeClient(); const response = await client.embeddings.create({ input: 'The quick brown fox jumped over the lazy dog', model: 'text-embedding-3-small', - encoding_format: 'base64', + encoding_format: 'float', }); expect(response.data?.at(0)?.embedding).toBeInstanceOf(Array); - expect(Number.isFinite(response.data?.at(0)?.embedding.at(0))).toBe(true); + expect(response.data?.at(0)?.embedding.at(0)).toBe(-0.099287055); }); - test('create: encoding_format=default should create float32 embeddings', async () => { + test('create: encoding_format=base64 should return base64 embeddings', async () => { + const client = makeClient(); const response = await client.embeddings.create({ input: 'The quick brown fox jumped over the lazy dog', model: 'text-embedding-3-small', + encoding_format: 'base64', }); - expect(response.data?.at(0)?.embedding).toBeInstanceOf(Array); - expect(Number.isFinite(response.data?.at(0)?.embedding.at(0))).toBe(true); + expect(typeof response.data?.at(0)?.embedding).toBe('string'); }); }); + +function makeClient(): OpenAI { + const { fetch, handleRequest } = mockFetch(); + + handleRequest(async (_, init) => { + const format = (JSON.parse(init!.body as string) as OpenAI.EmbeddingCreateParams).encoding_format; + return new Response( + await fs.readFile( + Path.join( + __dirname, + + // these responses were taken from the live API with: + // + // model: 'text-embedding-3-large', + // input: 'h', + // dimensions: 256, + + format === 'base64' ? 'embeddings-base64-response.json' : 'embeddings-float-response.json', + ), + ), + { + status: 200, + headers: { + 'Content-Type': 'application/json', + }, + }, + ); + }); + + return new OpenAI({ + fetch, + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', + }); +} From 0d31f406d3c47fb2f3a2a406a53ca28279af5641 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 11:01:19 +0000 Subject: [PATCH 202/246] release: 4.92.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 24 ++++++++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 28 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 0fdb6f309..e2b30744d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.91.1" + ".": "4.92.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 0de0d9630..56fe95617 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## 4.92.0 (2025-04-07) + +Full Changelog: [v4.91.1...v4.92.0](https://github.com/openai/openai-node/compare/v4.91.1...v4.92.0) + +### Features + +* **api:** manual updates ([891754d](https://github.com/openai/openai-node/commit/891754d7fa42d71ce4f93288dd043ef0b97fee23)) +* **api:** manual updates ([01e5546](https://github.com/openai/openai-node/commit/01e5546f3f48a1f4d645e09e7581f16b30f25bdd)) +* **api:** manual updates ([f38dbf3](https://github.com/openai/openai-node/commit/f38dbf3b39b0800b3bbef5c603a4fa2b616f25d8)) +* **api:** manual updates ([1f12253](https://github.com/openai/openai-node/commit/1f12253054a5a7e35dc03b17901b4c1f33bf5b3d)) + + +### Bug Fixes + +* **api:** improve type resolution when importing as a package ([#1444](https://github.com/openai/openai-node/issues/1444)) ([4aa46d6](https://github.com/openai/openai-node/commit/4aa46d6c0da681bcdde31fcbb09e8ba6fdaf764b)) +* **client:** send `X-Stainless-Timeout` in seconds ([#1442](https://github.com/openai/openai-node/issues/1442)) ([aa4206c](https://github.com/openai/openai-node/commit/aa4206c7d93b4e3114a697f5467ffbbf5a64d1a8)) +* **embeddings:** correctly decode base64 data ([#1448](https://github.com/openai/openai-node/issues/1448)) ([58128f7](https://github.com/openai/openai-node/commit/58128f7efde73726da740c42adde7b02cdf60a6a)) +* **mcp:** remove unused tools.ts ([#1445](https://github.com/openai/openai-node/issues/1445)) ([520a8fa](https://github.com/openai/openai-node/commit/520a8fa77a69ce5855dde3481f9bd39339cb7b83)) + + +### Chores + +* **internal:** add aliases for Record and Array ([#1443](https://github.com/openai/openai-node/issues/1443)) ([b65391b](https://github.com/openai/openai-node/commit/b65391ba10d5063035c3e5c0bcc5a48ffc80f41d)) + ## 4.91.1 (2025-04-01) Full Changelog: [v4.91.0...v4.91.1](https://github.com/openai/openai-node/compare/v4.91.0...v4.91.1) diff --git a/jsr.json b/jsr.json index 9bd85f8c9..e5bac64a6 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.91.1", + "version": "4.92.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index cfa3e6201..2f79e9653 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.91.1", + "version": "4.92.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 85314d847..e2dac6b4c 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.91.1'; // x-release-please-version +export const VERSION = '4.92.0'; // x-release-please-version From 93569f39799512604db439af20f0ef0ad3dae295 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 14:51:53 +0000 Subject: [PATCH 203/246] chore(internal): only run examples workflow in main repo (#1450) --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 627f5954f..6e59bb3fa 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -68,6 +68,7 @@ jobs: examples: name: examples runs-on: ubuntu-latest + if: github.repository == 'openai/openai-node' steps: - uses: actions/checkout@v4 From 324b091e8d70c6a13e486ca87727d4bd59d9b71f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 14:52:33 +0000 Subject: [PATCH 204/246] release: 4.92.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e2b30744d..837894bfb 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.92.0" + ".": "4.92.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 56fe95617..105627c5b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.92.1 (2025-04-07) + +Full Changelog: [v4.92.0...v4.92.1](https://github.com/openai/openai-node/compare/v4.92.0...v4.92.1) + +### Chores + +* **internal:** only run examples workflow in main repo ([#1450](https://github.com/openai/openai-node/issues/1450)) ([5e49a7a](https://github.com/openai/openai-node/commit/5e49a7a447bb788fa05898c15ae57c6ea9c8fd49)) + ## 4.92.0 (2025-04-07) Full Changelog: [v4.91.1...v4.92.0](https://github.com/openai/openai-node/compare/v4.91.1...v4.92.0) diff --git a/jsr.json b/jsr.json index e5bac64a6..b986198a1 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.92.0", + "version": "4.92.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 2f79e9653..58c231fda 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.92.0", + "version": "4.92.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index e2dac6b4c..bfae301de 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.92.0'; // x-release-please-version +export const VERSION = '4.92.1'; // x-release-please-version From 15a86c958bf300486907f2498e1028fc9bc50b00 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 14:25:49 +0000 Subject: [PATCH 205/246] chore(tests): improve enum examples (#1454) --- tests/api-resources/images.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts index 88eb97a93..43e67b030 100644 --- a/tests/api-resources/images.test.ts +++ b/tests/api-resources/images.test.ts @@ -28,7 +28,7 @@ describe('resource images', () => { model: 'dall-e-2', n: 1, response_format: 'url', - size: '256x256', + size: '1024x1024', user: 'user-1234', }); }); @@ -55,7 +55,7 @@ describe('resource images', () => { model: 'dall-e-2', n: 1, response_format: 'url', - size: '256x256', + size: '1024x1024', user: 'user-1234', }); }); @@ -78,7 +78,7 @@ describe('resource images', () => { n: 1, quality: 'standard', response_format: 'url', - size: '256x256', + size: '1024x1024', style: 'vivid', user: 'user-1234', }); From 33b66f517e756e63c676efee97f7122b3cf165d1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 18:49:53 +0000 Subject: [PATCH 206/246] feat(api): Add evalapi to sdk (#1456) Adding the evalsapi to the sdk. --- .stats.yml | 8 +- api.md | 72 ++ src/index.ts | 38 + src/resources/evals.ts | 3 + src/resources/evals/evals.ts | 783 ++++++++++++ src/resources/evals/index.ts | 33 + src/resources/evals/runs.ts | 3 + src/resources/evals/runs/index.ts | 23 + src/resources/evals/runs/output-items.ts | 410 +++++++ src/resources/evals/runs/runs.ts | 1058 +++++++++++++++++ src/resources/fine-tuning/checkpoints.ts | 3 + .../fine-tuning/checkpoints/checkpoints.ts | 32 + .../fine-tuning/checkpoints/index.ts | 12 + .../fine-tuning/checkpoints/permissions.ts | 198 +++ src/resources/fine-tuning/fine-tuning.ts | 6 + src/resources/fine-tuning/index.ts | 1 + src/resources/index.ts | 17 + tests/api-resources/evals/evals.test.ts | 417 +++++++ .../evals/runs/output-items.test.ts | 61 + tests/api-resources/evals/runs/runs.test.ts | 118 ++ .../checkpoints/permissions.test.ts | 85 ++ 21 files changed, 3377 insertions(+), 4 deletions(-) create mode 100644 src/resources/evals.ts create mode 100644 src/resources/evals/evals.ts create mode 100644 src/resources/evals/index.ts create mode 100644 src/resources/evals/runs.ts create mode 100644 src/resources/evals/runs/index.ts create mode 100644 src/resources/evals/runs/output-items.ts create mode 100644 src/resources/evals/runs/runs.ts create mode 100644 src/resources/fine-tuning/checkpoints.ts create mode 100644 src/resources/fine-tuning/checkpoints/checkpoints.ts create mode 100644 src/resources/fine-tuning/checkpoints/index.ts create mode 100644 src/resources/fine-tuning/checkpoints/permissions.ts create mode 100644 tests/api-resources/evals/evals.test.ts create mode 100644 tests/api-resources/evals/runs/output-items.test.ts create mode 100644 tests/api-resources/evals/runs/runs.test.ts create mode 100644 tests/api-resources/fine-tuning/checkpoints/permissions.test.ts diff --git a/.stats.yml b/.stats.yml index aebb90c8c..ebe07c137 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 82 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml -openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: bcd2cacdcb9fae9938f273cd167f613c +configured_endpoints: 97 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-472fe3036ea745365257fe870c0330917fb3153705c2826f49873cd631319b0a.yml +openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 +config_hash: ef19d36c307306f14f2e1cd5c834a151 diff --git a/api.md b/api.md index cf464cf63..2eb54b34a 100644 --- a/api.md +++ b/api.md @@ -235,6 +235,22 @@ Methods: - client.fineTuning.jobs.checkpoints.list(fineTuningJobId, { ...params }) -> FineTuningJobCheckpointsPage +## Checkpoints + +### Permissions + +Types: + +- PermissionCreateResponse +- PermissionRetrieveResponse +- PermissionDeleteResponse + +Methods: + +- client.fineTuning.checkpoints.permissions.create(fineTunedModelCheckpoint, { ...params }) -> PermissionCreateResponsesPage +- client.fineTuning.checkpoints.permissions.retrieve(fineTunedModelCheckpoint, { ...params }) -> PermissionRetrieveResponse +- client.fineTuning.checkpoints.permissions.del(fineTunedModelCheckpoint) -> PermissionDeleteResponse + # VectorStores Types: @@ -643,3 +659,59 @@ Types: Methods: - client.responses.inputItems.list(responseId, { ...params }) -> ResponseItemsPage + +# Evals + +Types: + +- EvalCustomDataSourceConfig +- EvalLabelModelGrader +- EvalStoredCompletionsDataSourceConfig +- EvalStringCheckGrader +- EvalTextSimilarityGrader +- EvalCreateResponse +- EvalRetrieveResponse +- EvalUpdateResponse +- EvalListResponse +- EvalDeleteResponse + +Methods: + +- client.evals.create({ ...params }) -> EvalCreateResponse +- client.evals.retrieve(evalId) -> EvalRetrieveResponse +- client.evals.update(evalId, { ...params }) -> EvalUpdateResponse +- client.evals.list({ ...params }) -> EvalListResponsesPage +- client.evals.del(evalId) -> EvalDeleteResponse + +## Runs + +Types: + +- CreateEvalCompletionsRunDataSource +- CreateEvalJSONLRunDataSource +- EvalAPIError +- RunCreateResponse +- RunRetrieveResponse +- RunListResponse +- RunDeleteResponse +- RunCancelResponse + +Methods: + +- client.evals.runs.create(evalId, { ...params }) -> RunCreateResponse +- client.evals.runs.retrieve(evalId, runId) -> RunRetrieveResponse +- client.evals.runs.list(evalId, { ...params }) -> RunListResponsesPage +- client.evals.runs.del(evalId, runId) -> RunDeleteResponse +- client.evals.runs.cancel(evalId, runId) -> RunCancelResponse + +### OutputItems + +Types: + +- OutputItemRetrieveResponse +- OutputItemListResponse + +Methods: + +- client.evals.runs.outputItems.retrieve(evalId, runId, outputItemId) -> OutputItemRetrieveResponse +- client.evals.runs.outputItems.list(evalId, runId, { ...params }) -> OutputItemListResponsesPage diff --git a/src/index.ts b/src/index.ts index 931894f2f..9e8d7ce37 100644 --- a/src/index.ts +++ b/src/index.ts @@ -66,6 +66,23 @@ import { import { Audio, AudioModel, AudioResponseFormat } from './resources/audio/audio'; import { Beta } from './resources/beta/beta'; import { Chat } from './resources/chat/chat'; +import { + EvalCreateParams, + EvalCreateResponse, + EvalCustomDataSourceConfig, + EvalDeleteResponse, + EvalLabelModelGrader, + EvalListParams, + EvalListResponse, + EvalListResponsesPage, + EvalRetrieveResponse, + EvalStoredCompletionsDataSourceConfig, + EvalStringCheckGrader, + EvalTextSimilarityGrader, + EvalUpdateParams, + EvalUpdateResponse, + Evals, +} from './resources/evals/evals'; import { FineTuning } from './resources/fine-tuning/fine-tuning'; import { Responses } from './resources/responses/responses'; import { @@ -293,6 +310,7 @@ export class OpenAI extends Core.APIClient { batches: API.Batches = new API.Batches(this); uploads: API.Uploads = new API.Uploads(this); responses: API.Responses = new API.Responses(this); + evals: API.Evals = new API.Evals(this); protected override defaultQuery(): Core.DefaultQuery | undefined { return this._options.defaultQuery; @@ -356,6 +374,8 @@ OpenAI.Batches = Batches; OpenAI.BatchesPage = BatchesPage; OpenAI.Uploads = UploadsAPIUploads; OpenAI.Responses = Responses; +OpenAI.Evals = Evals; +OpenAI.EvalListResponsesPage = EvalListResponsesPage; export declare namespace OpenAI { export type RequestOptions = Core.RequestOptions; @@ -508,6 +528,24 @@ export declare namespace OpenAI { export { Responses as Responses }; + export { + Evals as Evals, + type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, + type EvalLabelModelGrader as EvalLabelModelGrader, + type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, + type EvalStringCheckGrader as EvalStringCheckGrader, + type EvalTextSimilarityGrader as EvalTextSimilarityGrader, + type EvalCreateResponse as EvalCreateResponse, + type EvalRetrieveResponse as EvalRetrieveResponse, + type EvalUpdateResponse as EvalUpdateResponse, + type EvalListResponse as EvalListResponse, + type EvalDeleteResponse as EvalDeleteResponse, + EvalListResponsesPage as EvalListResponsesPage, + type EvalCreateParams as EvalCreateParams, + type EvalUpdateParams as EvalUpdateParams, + type EvalListParams as EvalListParams, + }; + export type AllModels = API.AllModels; export type ChatModel = API.ChatModel; export type ComparisonFilter = API.ComparisonFilter; diff --git a/src/resources/evals.ts b/src/resources/evals.ts new file mode 100644 index 000000000..b611710e1 --- /dev/null +++ b/src/resources/evals.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './evals/index'; diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts new file mode 100644 index 000000000..84ff6d1bb --- /dev/null +++ b/src/resources/evals/evals.ts @@ -0,0 +1,783 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as Shared from '../shared'; +import * as RunsAPI from './runs/runs'; +import { + CreateEvalCompletionsRunDataSource, + CreateEvalJSONLRunDataSource, + EvalAPIError, + RunCancelResponse, + RunCreateParams, + RunCreateResponse, + RunDeleteResponse, + RunListParams, + RunListResponse, + RunListResponsesPage, + RunRetrieveResponse, + Runs, +} from './runs/runs'; +import { CursorPage, type CursorPageParams } from '../../pagination'; + +export class Evals extends APIResource { + runs: RunsAPI.Runs = new RunsAPI.Runs(this._client); + + /** + * Create the structure of an evaluation that can be used to test a model's + * performance. An evaluation is a set of testing criteria and a datasource. After + * creating an evaluation, you can run it on different models and model parameters. + * We support several types of graders and datasources. For more information, see + * the [Evals guide](https://platform.openai.com/docs/guides/evals). + */ + create(body: EvalCreateParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/evals', { body, ...options }); + } + + /** + * Get an evaluation by ID. + */ + retrieve(evalId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/evals/${evalId}`, options); + } + + /** + * Update certain properties of an evaluation. + */ + update( + evalId: string, + body: EvalUpdateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/evals/${evalId}`, { body, ...options }); + } + + /** + * List evaluations for a project. + */ + list( + query?: EvalListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list(options?: Core.RequestOptions): Core.PagePromise; + list( + query: EvalListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list({}, query); + } + return this._client.getAPIList('/evals', EvalListResponsesPage, { query, ...options }); + } + + /** + * Delete an evaluation. + */ + del(evalId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/evals/${evalId}`, options); + } +} + +export class EvalListResponsesPage extends CursorPage {} + +/** + * A CustomDataSourceConfig which specifies the schema of your `item` and + * optionally `sample` namespaces. The response schema defines the shape of the + * data that will be: + * + * - Used to define your testing criteria and + * - What data is required when creating a run + */ +export interface EvalCustomDataSourceConfig { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `custom`. + */ + type: 'custom'; +} + +/** + * A LabelModelGrader object which uses a model to assign labels to each item in + * the evaluation. + */ +export interface EvalLabelModelGrader { + input: Array; + + /** + * The labels to assign to each item in the evaluation. + */ + labels: Array; + + /** + * The model to use for the evaluation. Must support structured outputs. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The labels that indicate a passing result. Must be a subset of labels. + */ + passing_labels: Array; + + /** + * The object type, which is always `label_model`. + */ + type: 'label_model'; +} + +export namespace EvalLabelModelGrader { + export interface InputMessage { + content: InputMessage.Content; + + /** + * The role of the message. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace InputMessage { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `input_text`. + */ + type: 'input_text'; + } + } + + export interface Assistant { + content: Assistant.Content; + + /** + * The role of the message. Must be `assistant` for output. + */ + role: 'assistant'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace Assistant { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `output_text`. + */ + type: 'output_text'; + } + } +} + +/** + * A StoredCompletionsDataSourceConfig which specifies the metadata property of + * your stored completions query. This is usually metadata like `usecase=chatbot` + * or `prompt-version=v2`, etc. The schema returned by this data source config is + * used to defined what variables are available in your evals. `item` and `sample` + * are both defined when using this data source config. + */ +export interface EvalStoredCompletionsDataSourceConfig { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `stored_completions`. + */ + type: 'stored_completions'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; +} + +/** + * A StringCheckGrader object that performs a string comparison between input and + * reference using a specified operation. + */ +export interface EvalStringCheckGrader { + /** + * The input text. This may include template strings. + */ + input: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`. + */ + operation: 'eq' | 'ne' | 'like' | 'ilike'; + + /** + * The reference text. This may include template strings. + */ + reference: string; + + /** + * The object type, which is always `string_check`. + */ + type: 'string_check'; +} + +/** + * A TextSimilarityGrader object which grades text based on similarity metrics. + */ +export interface EvalTextSimilarityGrader { + /** + * The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, + * `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + */ + evaluation_metric: + | 'fuzzy_match' + | 'bleu' + | 'gleu' + | 'meteor' + | 'rouge_1' + | 'rouge_2' + | 'rouge_3' + | 'rouge_4' + | 'rouge_5' + | 'rouge_l' + | 'cosine'; + + /** + * The text being graded. + */ + input: string; + + /** + * A float score where a value greater than or equal indicates a passing grade. + */ + pass_threshold: number; + + /** + * The text being graded against. + */ + reference: string; + + /** + * The type of grader. + */ + type: 'text_similarity'; + + /** + * The name of the grader. + */ + name?: string; +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o3-mini is better at my usecase than gpt-4o + */ +export interface EvalCreateResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * Indicates whether the evaluation is shared with OpenAI. + */ + share_with_openai: boolean; + + /** + * A list of testing criteria. + */ + testing_criteria: Array; +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o3-mini is better at my usecase than gpt-4o + */ +export interface EvalRetrieveResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * Indicates whether the evaluation is shared with OpenAI. + */ + share_with_openai: boolean; + + /** + * A list of testing criteria. + */ + testing_criteria: Array; +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o3-mini is better at my usecase than gpt-4o + */ +export interface EvalUpdateResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * Indicates whether the evaluation is shared with OpenAI. + */ + share_with_openai: boolean; + + /** + * A list of testing criteria. + */ + testing_criteria: Array; +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o3-mini is better at my usecase than gpt-4o + */ +export interface EvalListResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * Indicates whether the evaluation is shared with OpenAI. + */ + share_with_openai: boolean; + + /** + * A list of testing criteria. + */ + testing_criteria: Array; +} + +export interface EvalDeleteResponse { + deleted: boolean; + + eval_id: string; + + object: string; +} + +export interface EvalCreateParams { + /** + * The configuration for the data source used for the evaluation runs. + */ + data_source_config: EvalCreateParams.Custom | EvalCreateParams.StoredCompletions; + + /** + * A list of graders for all eval runs in this group. + */ + testing_criteria: Array; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name?: string; + + /** + * Indicates whether the evaluation is shared with OpenAI. + */ + share_with_openai?: boolean; +} + +export namespace EvalCreateParams { + /** + * A CustomDataSourceConfig object that defines the schema for the data source used + * for the evaluation runs. This schema is used to define the shape of the data + * that will be: + * + * - Used to define your testing criteria and + * - What data is required when creating a run + */ + export interface Custom { + /** + * The json schema for the run data source items. + */ + item_schema: Record; + + /** + * The type of data source. Always `custom`. + */ + type: 'custom'; + + /** + * Whether to include the sample schema in the data source. + */ + include_sample_schema?: boolean; + } + + /** + * A data source config which specifies the metadata property of your stored + * completions query. This is usually metadata like `usecase=chatbot` or + * `prompt-version=v2`, etc. + */ + export interface StoredCompletions { + /** + * The type of data source. Always `stored_completions`. + */ + type: 'stored_completions'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + + /** + * A LabelModelGrader object which uses a model to assign labels to each item in + * the evaluation. + */ + export interface LabelModel { + input: Array; + + /** + * The labels to classify to each item in the evaluation. + */ + labels: Array; + + /** + * The model to use for the evaluation. Must support structured outputs. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The labels that indicate a passing result. Must be a subset of labels. + */ + passing_labels: Array; + + /** + * The object type, which is always `label_model`. + */ + type: 'label_model'; + } + + export namespace LabelModel { + export interface SimpleInputMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + export interface InputMessage { + content: InputMessage.Content; + + /** + * The role of the message. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace InputMessage { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `input_text`. + */ + type: 'input_text'; + } + } + + export interface OutputMessage { + content: OutputMessage.Content; + + /** + * The role of the message. Must be `assistant` for output. + */ + role: 'assistant'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace OutputMessage { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `output_text`. + */ + type: 'output_text'; + } + } + } +} + +export interface EvalUpdateParams { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * Rename the evaluation. + */ + name?: string; +} + +export interface EvalListParams extends CursorPageParams { + /** + * Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for + * descending order. + */ + order?: 'asc' | 'desc'; + + /** + * Evals can be ordered by creation time or last updated time. Use `created_at` for + * creation time or `updated_at` for last updated time. + */ + order_by?: 'created_at' | 'updated_at'; +} + +Evals.EvalListResponsesPage = EvalListResponsesPage; +Evals.Runs = Runs; +Evals.RunListResponsesPage = RunListResponsesPage; + +export declare namespace Evals { + export { + type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, + type EvalLabelModelGrader as EvalLabelModelGrader, + type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, + type EvalStringCheckGrader as EvalStringCheckGrader, + type EvalTextSimilarityGrader as EvalTextSimilarityGrader, + type EvalCreateResponse as EvalCreateResponse, + type EvalRetrieveResponse as EvalRetrieveResponse, + type EvalUpdateResponse as EvalUpdateResponse, + type EvalListResponse as EvalListResponse, + type EvalDeleteResponse as EvalDeleteResponse, + EvalListResponsesPage as EvalListResponsesPage, + type EvalCreateParams as EvalCreateParams, + type EvalUpdateParams as EvalUpdateParams, + type EvalListParams as EvalListParams, + }; + + export { + Runs as Runs, + type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, + type EvalAPIError as EvalAPIError, + type RunCreateResponse as RunCreateResponse, + type RunRetrieveResponse as RunRetrieveResponse, + type RunListResponse as RunListResponse, + type RunDeleteResponse as RunDeleteResponse, + type RunCancelResponse as RunCancelResponse, + RunListResponsesPage as RunListResponsesPage, + type RunCreateParams as RunCreateParams, + type RunListParams as RunListParams, + }; +} diff --git a/src/resources/evals/index.ts b/src/resources/evals/index.ts new file mode 100644 index 000000000..a246fe4e7 --- /dev/null +++ b/src/resources/evals/index.ts @@ -0,0 +1,33 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + EvalListResponsesPage, + Evals, + type EvalCustomDataSourceConfig, + type EvalLabelModelGrader, + type EvalStoredCompletionsDataSourceConfig, + type EvalStringCheckGrader, + type EvalTextSimilarityGrader, + type EvalCreateResponse, + type EvalRetrieveResponse, + type EvalUpdateResponse, + type EvalListResponse, + type EvalDeleteResponse, + type EvalCreateParams, + type EvalUpdateParams, + type EvalListParams, +} from './evals'; +export { + RunListResponsesPage, + Runs, + type CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource, + type EvalAPIError, + type RunCreateResponse, + type RunRetrieveResponse, + type RunListResponse, + type RunDeleteResponse, + type RunCancelResponse, + type RunCreateParams, + type RunListParams, +} from './runs/index'; diff --git a/src/resources/evals/runs.ts b/src/resources/evals/runs.ts new file mode 100644 index 000000000..a3cc2bc7f --- /dev/null +++ b/src/resources/evals/runs.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './runs/index'; diff --git a/src/resources/evals/runs/index.ts b/src/resources/evals/runs/index.ts new file mode 100644 index 000000000..d0e18bff4 --- /dev/null +++ b/src/resources/evals/runs/index.ts @@ -0,0 +1,23 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + OutputItemListResponsesPage, + OutputItems, + type OutputItemRetrieveResponse, + type OutputItemListResponse, + type OutputItemListParams, +} from './output-items'; +export { + RunListResponsesPage, + Runs, + type CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource, + type EvalAPIError, + type RunCreateResponse, + type RunRetrieveResponse, + type RunListResponse, + type RunDeleteResponse, + type RunCancelResponse, + type RunCreateParams, + type RunListParams, +} from './runs'; diff --git a/src/resources/evals/runs/output-items.ts b/src/resources/evals/runs/output-items.ts new file mode 100644 index 000000000..ee947c60f --- /dev/null +++ b/src/resources/evals/runs/output-items.ts @@ -0,0 +1,410 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; +import * as RunsAPI from './runs'; +import { CursorPage, type CursorPageParams } from '../../../pagination'; + +export class OutputItems extends APIResource { + /** + * Get an evaluation run output item by ID. + */ + retrieve( + evalId: string, + runId: string, + outputItemId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/evals/${evalId}/runs/${runId}/output_items/${outputItemId}`, options); + } + + /** + * Get a list of output items for an evaluation run. + */ + list( + evalId: string, + runId: string, + query?: OutputItemListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + evalId: string, + runId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + evalId: string, + runId: string, + query: OutputItemListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(evalId, runId, {}, query); + } + return this._client.getAPIList( + `/evals/${evalId}/runs/${runId}/output_items`, + OutputItemListResponsesPage, + { query, ...options }, + ); + } +} + +export class OutputItemListResponsesPage extends CursorPage {} + +/** + * A schema representing an evaluation run output item. + */ +export interface OutputItemRetrieveResponse { + /** + * Unique identifier for the evaluation run output item. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Details of the input data source item. + */ + datasource_item: Record; + + /** + * The identifier for the data source item. + */ + datasource_item_id: number; + + /** + * The identifier of the evaluation group. + */ + eval_id: string; + + /** + * The type of the object. Always "eval.run.output_item". + */ + object: 'eval.run.output_item'; + + /** + * A list of results from the evaluation run. + */ + results: Array>; + + /** + * The identifier of the evaluation run associated with this output item. + */ + run_id: string; + + /** + * A sample containing the input and output of the evaluation run. + */ + sample: OutputItemRetrieveResponse.Sample; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace OutputItemRetrieveResponse { + /** + * A sample containing the input and output of the evaluation run. + */ + export interface Sample { + /** + * An object representing an error response from the Eval API. + */ + error: RunsAPI.EvalAPIError; + + /** + * The reason why the sample generation was finished. + */ + finish_reason: string; + + /** + * An array of input messages. + */ + input: Array; + + /** + * The maximum number of tokens allowed for completion. + */ + max_completion_tokens: number; + + /** + * The model used for generating the sample. + */ + model: string; + + /** + * An array of output messages. + */ + output: Array; + + /** + * The seed used for generating the sample. + */ + seed: number; + + /** + * The sampling temperature used. + */ + temperature: number; + + /** + * The top_p value used for sampling. + */ + top_p: number; + + /** + * Token usage details for the sample. + */ + usage: Sample.Usage; + } + + export namespace Sample { + /** + * An input message. + */ + export interface Input { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message sender (e.g., system, user, developer). + */ + role: string; + } + + export interface Output { + /** + * The content of the message. + */ + content?: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role?: string; + } + + /** + * Token usage details for the sample. + */ + export interface Usage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + } +} + +/** + * A schema representing an evaluation run output item. + */ +export interface OutputItemListResponse { + /** + * Unique identifier for the evaluation run output item. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Details of the input data source item. + */ + datasource_item: Record; + + /** + * The identifier for the data source item. + */ + datasource_item_id: number; + + /** + * The identifier of the evaluation group. + */ + eval_id: string; + + /** + * The type of the object. Always "eval.run.output_item". + */ + object: 'eval.run.output_item'; + + /** + * A list of results from the evaluation run. + */ + results: Array>; + + /** + * The identifier of the evaluation run associated with this output item. + */ + run_id: string; + + /** + * A sample containing the input and output of the evaluation run. + */ + sample: OutputItemListResponse.Sample; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace OutputItemListResponse { + /** + * A sample containing the input and output of the evaluation run. + */ + export interface Sample { + /** + * An object representing an error response from the Eval API. + */ + error: RunsAPI.EvalAPIError; + + /** + * The reason why the sample generation was finished. + */ + finish_reason: string; + + /** + * An array of input messages. + */ + input: Array; + + /** + * The maximum number of tokens allowed for completion. + */ + max_completion_tokens: number; + + /** + * The model used for generating the sample. + */ + model: string; + + /** + * An array of output messages. + */ + output: Array; + + /** + * The seed used for generating the sample. + */ + seed: number; + + /** + * The sampling temperature used. + */ + temperature: number; + + /** + * The top_p value used for sampling. + */ + top_p: number; + + /** + * Token usage details for the sample. + */ + usage: Sample.Usage; + } + + export namespace Sample { + /** + * An input message. + */ + export interface Input { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message sender (e.g., system, user, developer). + */ + role: string; + } + + export interface Output { + /** + * The content of the message. + */ + content?: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role?: string; + } + + /** + * Token usage details for the sample. + */ + export interface Usage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + } +} + +export interface OutputItemListParams extends CursorPageParams { + /** + * Sort order for output items by timestamp. Use `asc` for ascending order or + * `desc` for descending order. Defaults to `asc`. + */ + order?: 'asc' | 'desc'; + + /** + * Filter output items by status. Use `failed` to filter by failed output items or + * `pass` to filter by passed output items. + */ + status?: 'fail' | 'pass'; +} + +OutputItems.OutputItemListResponsesPage = OutputItemListResponsesPage; + +export declare namespace OutputItems { + export { + type OutputItemRetrieveResponse as OutputItemRetrieveResponse, + type OutputItemListResponse as OutputItemListResponse, + OutputItemListResponsesPage as OutputItemListResponsesPage, + type OutputItemListParams as OutputItemListParams, + }; +} diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts new file mode 100644 index 000000000..ca2b7f424 --- /dev/null +++ b/src/resources/evals/runs/runs.ts @@ -0,0 +1,1058 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; +import * as Shared from '../../shared'; +import * as OutputItemsAPI from './output-items'; +import { + OutputItemListParams, + OutputItemListResponse, + OutputItemListResponsesPage, + OutputItemRetrieveResponse, + OutputItems, +} from './output-items'; +import { CursorPage, type CursorPageParams } from '../../../pagination'; + +export class Runs extends APIResource { + outputItems: OutputItemsAPI.OutputItems = new OutputItemsAPI.OutputItems(this._client); + + /** + * Create a new evaluation run. This is the endpoint that will kick off grading. + */ + create( + evalId: string, + body: RunCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/evals/${evalId}/runs`, { body, ...options }); + } + + /** + * Get an evaluation run by ID. + */ + retrieve( + evalId: string, + runId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/evals/${evalId}/runs/${runId}`, options); + } + + /** + * Get a list of runs for an evaluation. + */ + list( + evalId: string, + query?: RunListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + evalId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + evalId: string, + query: RunListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(evalId, {}, query); + } + return this._client.getAPIList(`/evals/${evalId}/runs`, RunListResponsesPage, { query, ...options }); + } + + /** + * Delete an eval run. + */ + del(evalId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/evals/${evalId}/runs/${runId}`, options); + } + + /** + * Cancel an ongoing evaluation run. + */ + cancel(evalId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post(`/evals/${evalId}/runs/${runId}`, options); + } +} + +export class RunListResponsesPage extends CursorPage {} + +/** + * A CompletionsRunDataSource object describing a model sampling configuration. + */ +export interface CreateEvalCompletionsRunDataSource { + input_messages: + | CreateEvalCompletionsRunDataSource.Template + | CreateEvalCompletionsRunDataSource.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model: string; + + /** + * A StoredCompletionsRunDataSource configuration describing a set of filters + */ + source: + | CreateEvalCompletionsRunDataSource.FileContent + | CreateEvalCompletionsRunDataSource.FileID + | CreateEvalCompletionsRunDataSource.StoredCompletions; + + /** + * The type of run data source. Always `completions`. + */ + type: 'completions'; + + sampling_params?: CreateEvalCompletionsRunDataSource.SamplingParams; +} + +export namespace CreateEvalCompletionsRunDataSource { + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + export interface InputMessage { + content: InputMessage.Content; + + /** + * The role of the message. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace InputMessage { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `input_text`. + */ + type: 'input_text'; + } + } + + export interface OutputMessage { + content: OutputMessage.Content; + + /** + * The role of the message. Must be `assistant` for output. + */ + role: 'assistant'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace OutputMessage { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A StoredCompletionsRunDataSource configuration describing a set of filters + */ + export interface StoredCompletions { + /** + * An optional Unix timestamp to filter items created after this time. + */ + created_after: number | null; + + /** + * An optional Unix timestamp to filter items created before this time. + */ + created_before: number | null; + + /** + * An optional maximum number of items to return. + */ + limit: number | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * An optional model to filter by (e.g., 'gpt-4o'). + */ + model: string | null; + + /** + * The type of source. Always `stored_completions`. + */ + type: 'stored_completions'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } +} + +/** + * A JsonlRunDataSource object with that specifies a JSONL file that matches the + * eval + */ +export interface CreateEvalJSONLRunDataSource { + source: CreateEvalJSONLRunDataSource.FileContent | CreateEvalJSONLRunDataSource.FileID; + + /** + * The type of data source. Always `jsonl`. + */ + type: 'jsonl'; +} + +export namespace CreateEvalJSONLRunDataSource { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } +} + +/** + * An object representing an error response from the Eval API. + */ +export interface EvalAPIError { + /** + * The error code. + */ + code: string; + + /** + * The error message. + */ + message: string; +} + +/** + * A schema representing an evaluation run. + */ +export interface RunCreateResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunCreateResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunCreateResponse { + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +/** + * A schema representing an evaluation run. + */ +export interface RunRetrieveResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunRetrieveResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunRetrieveResponse { + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +/** + * A schema representing an evaluation run. + */ +export interface RunListResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunListResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunListResponse { + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +export interface RunDeleteResponse { + deleted?: boolean; + + object?: string; + + run_id?: string; +} + +/** + * A schema representing an evaluation run. + */ +export interface RunCancelResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunCancelResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunCancelResponse { + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +export interface RunCreateParams { + /** + * Details about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * The name of the run. + */ + name?: string; +} + +export interface RunListParams extends CursorPageParams { + /** + * Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for + * descending order. Defaults to `asc`. + */ + order?: 'asc' | 'desc'; + + /** + * Filter runs by status. Use "queued" | "in_progress" | "failed" | "completed" | + * "canceled". + */ + status?: 'queued' | 'in_progress' | 'completed' | 'canceled' | 'failed'; +} + +Runs.RunListResponsesPage = RunListResponsesPage; +Runs.OutputItems = OutputItems; +Runs.OutputItemListResponsesPage = OutputItemListResponsesPage; + +export declare namespace Runs { + export { + type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, + type EvalAPIError as EvalAPIError, + type RunCreateResponse as RunCreateResponse, + type RunRetrieveResponse as RunRetrieveResponse, + type RunListResponse as RunListResponse, + type RunDeleteResponse as RunDeleteResponse, + type RunCancelResponse as RunCancelResponse, + RunListResponsesPage as RunListResponsesPage, + type RunCreateParams as RunCreateParams, + type RunListParams as RunListParams, + }; + + export { + OutputItems as OutputItems, + type OutputItemRetrieveResponse as OutputItemRetrieveResponse, + type OutputItemListResponse as OutputItemListResponse, + OutputItemListResponsesPage as OutputItemListResponsesPage, + type OutputItemListParams as OutputItemListParams, + }; +} diff --git a/src/resources/fine-tuning/checkpoints.ts b/src/resources/fine-tuning/checkpoints.ts new file mode 100644 index 000000000..eb09063f6 --- /dev/null +++ b/src/resources/fine-tuning/checkpoints.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './checkpoints/index'; diff --git a/src/resources/fine-tuning/checkpoints/checkpoints.ts b/src/resources/fine-tuning/checkpoints/checkpoints.ts new file mode 100644 index 000000000..08422aa64 --- /dev/null +++ b/src/resources/fine-tuning/checkpoints/checkpoints.ts @@ -0,0 +1,32 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as PermissionsAPI from './permissions'; +import { + PermissionCreateParams, + PermissionCreateResponse, + PermissionCreateResponsesPage, + PermissionDeleteResponse, + PermissionRetrieveParams, + PermissionRetrieveResponse, + Permissions, +} from './permissions'; + +export class Checkpoints extends APIResource { + permissions: PermissionsAPI.Permissions = new PermissionsAPI.Permissions(this._client); +} + +Checkpoints.Permissions = Permissions; +Checkpoints.PermissionCreateResponsesPage = PermissionCreateResponsesPage; + +export declare namespace Checkpoints { + export { + Permissions as Permissions, + type PermissionCreateResponse as PermissionCreateResponse, + type PermissionRetrieveResponse as PermissionRetrieveResponse, + type PermissionDeleteResponse as PermissionDeleteResponse, + PermissionCreateResponsesPage as PermissionCreateResponsesPage, + type PermissionCreateParams as PermissionCreateParams, + type PermissionRetrieveParams as PermissionRetrieveParams, + }; +} diff --git a/src/resources/fine-tuning/checkpoints/index.ts b/src/resources/fine-tuning/checkpoints/index.ts new file mode 100644 index 000000000..51d1af9cf --- /dev/null +++ b/src/resources/fine-tuning/checkpoints/index.ts @@ -0,0 +1,12 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Checkpoints } from './checkpoints'; +export { + PermissionCreateResponsesPage, + Permissions, + type PermissionCreateResponse, + type PermissionRetrieveResponse, + type PermissionDeleteResponse, + type PermissionCreateParams, + type PermissionRetrieveParams, +} from './permissions'; diff --git a/src/resources/fine-tuning/checkpoints/permissions.ts b/src/resources/fine-tuning/checkpoints/permissions.ts new file mode 100644 index 000000000..500c3de81 --- /dev/null +++ b/src/resources/fine-tuning/checkpoints/permissions.ts @@ -0,0 +1,198 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; +import { Page } from '../../../pagination'; + +export class Permissions extends APIResource { + /** + * **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys). + * + * This enables organization owners to share fine-tuned models with other projects + * in their organization. + */ + create( + fineTunedModelCheckpoint: string, + body: PermissionCreateParams, + options?: Core.RequestOptions, + ): Core.PagePromise { + return this._client.getAPIList( + `/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`, + PermissionCreateResponsesPage, + { body, method: 'post', ...options }, + ); + } + + /** + * **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + * + * Organization owners can use this endpoint to view all permissions for a + * fine-tuned model checkpoint. + */ + retrieve( + fineTunedModelCheckpoint: string, + query?: PermissionRetrieveParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + retrieve( + fineTunedModelCheckpoint: string, + options?: Core.RequestOptions, + ): Core.APIPromise; + retrieve( + fineTunedModelCheckpoint: string, + query: PermissionRetrieveParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.retrieve(fineTunedModelCheckpoint, {}, query); + } + return this._client.get(`/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`, { + query, + ...options, + }); + } + + /** + * **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + * + * Organization owners can use this endpoint to delete a permission for a + * fine-tuned model checkpoint. + */ + del( + fineTunedModelCheckpoint: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.delete(`/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`, options); + } +} + +/** + * Note: no pagination actually occurs yet, this is for forwards-compatibility. + */ +export class PermissionCreateResponsesPage extends Page {} + +/** + * The `checkpoint.permission` object represents a permission for a fine-tuned + * model checkpoint. + */ +export interface PermissionCreateResponse { + /** + * The permission identifier, which can be referenced in the API endpoints. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the permission was created. + */ + created_at: number; + + /** + * The object type, which is always "checkpoint.permission". + */ + object: 'checkpoint.permission'; + + /** + * The project identifier that the permission is for. + */ + project_id: string; +} + +export interface PermissionRetrieveResponse { + data: Array; + + has_more: boolean; + + object: 'list'; + + first_id?: string | null; + + last_id?: string | null; +} + +export namespace PermissionRetrieveResponse { + /** + * The `checkpoint.permission` object represents a permission for a fine-tuned + * model checkpoint. + */ + export interface Data { + /** + * The permission identifier, which can be referenced in the API endpoints. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the permission was created. + */ + created_at: number; + + /** + * The object type, which is always "checkpoint.permission". + */ + object: 'checkpoint.permission'; + + /** + * The project identifier that the permission is for. + */ + project_id: string; + } +} + +export interface PermissionDeleteResponse { + /** + * The ID of the fine-tuned model checkpoint permission that was deleted. + */ + id: string; + + /** + * Whether the fine-tuned model checkpoint permission was successfully deleted. + */ + deleted: boolean; + + /** + * The object type, which is always "checkpoint.permission". + */ + object: 'checkpoint.permission'; +} + +export interface PermissionCreateParams { + /** + * The project identifiers to grant access to. + */ + project_ids: Array; +} + +export interface PermissionRetrieveParams { + /** + * Identifier for the last permission ID from the previous pagination request. + */ + after?: string; + + /** + * Number of permissions to retrieve. + */ + limit?: number; + + /** + * The order in which to retrieve permissions. + */ + order?: 'ascending' | 'descending'; + + /** + * The ID of the project to get permissions for. + */ + project_id?: string; +} + +Permissions.PermissionCreateResponsesPage = PermissionCreateResponsesPage; + +export declare namespace Permissions { + export { + type PermissionCreateResponse as PermissionCreateResponse, + type PermissionRetrieveResponse as PermissionRetrieveResponse, + type PermissionDeleteResponse as PermissionDeleteResponse, + PermissionCreateResponsesPage as PermissionCreateResponsesPage, + type PermissionCreateParams as PermissionCreateParams, + type PermissionRetrieveParams as PermissionRetrieveParams, + }; +} diff --git a/src/resources/fine-tuning/fine-tuning.ts b/src/resources/fine-tuning/fine-tuning.ts index df013c8ec..9b0a01992 100644 --- a/src/resources/fine-tuning/fine-tuning.ts +++ b/src/resources/fine-tuning/fine-tuning.ts @@ -1,6 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; +import * as CheckpointsAPI from './checkpoints/checkpoints'; +import { Checkpoints } from './checkpoints/checkpoints'; import * as JobsAPI from './jobs/jobs'; import { FineTuningJob, @@ -18,11 +20,13 @@ import { export class FineTuning extends APIResource { jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client); + checkpoints: CheckpointsAPI.Checkpoints = new CheckpointsAPI.Checkpoints(this._client); } FineTuning.Jobs = Jobs; FineTuning.FineTuningJobsPage = FineTuningJobsPage; FineTuning.FineTuningJobEventsPage = FineTuningJobEventsPage; +FineTuning.Checkpoints = Checkpoints; export declare namespace FineTuning { export { @@ -38,4 +42,6 @@ export declare namespace FineTuning { type JobListParams as JobListParams, type JobListEventsParams as JobListEventsParams, }; + + export { Checkpoints as Checkpoints }; } diff --git a/src/resources/fine-tuning/index.ts b/src/resources/fine-tuning/index.ts index 4954406b8..d23161c62 100644 --- a/src/resources/fine-tuning/index.ts +++ b/src/resources/fine-tuning/index.ts @@ -1,5 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export { Checkpoints } from './checkpoints/index'; export { FineTuning } from './fine-tuning'; export { FineTuningJobsPage, diff --git a/src/resources/index.ts b/src/resources/index.ts index 04c2c887b..0d8ec9220 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -29,6 +29,23 @@ export { type EmbeddingModel, type EmbeddingCreateParams, } from './embeddings'; +export { + EvalListResponsesPage, + Evals, + type EvalCustomDataSourceConfig, + type EvalLabelModelGrader, + type EvalStoredCompletionsDataSourceConfig, + type EvalStringCheckGrader, + type EvalTextSimilarityGrader, + type EvalCreateResponse, + type EvalRetrieveResponse, + type EvalUpdateResponse, + type EvalListResponse, + type EvalDeleteResponse, + type EvalCreateParams, + type EvalUpdateParams, + type EvalListParams, +} from './evals/evals'; export { FileObjectsPage, Files, diff --git a/tests/api-resources/evals/evals.test.ts b/tests/api-resources/evals/evals.test.ts new file mode 100644 index 000000000..3aeb3e15c --- /dev/null +++ b/tests/api-resources/evals/evals.test.ts @@ -0,0 +1,417 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource evals', () => { + test('create: only required params', async () => { + const responsePromise = client.evals.create({ + data_source_config: { + item_schema: { + '0': 'bar', + '1': 'bar', + '2': 'bar', + '3': 'bar', + '4': 'bar', + '5': 'bar', + '6': 'bar', + '7': 'bar', + '8': 'bar', + '9': 'bar', + '10': 'bar', + '11': 'bar', + '12': 'bar', + '13': 'bar', + '14': 'bar', + '15': 'bar', + '16': 'bar', + '17': 'bar', + '18': 'bar', + '19': 'bar', + '20': 'bar', + '21': 'bar', + '22': 'bar', + '23': 'bar', + '24': 'bar', + '25': 'bar', + '26': 'bar', + '27': 'bar', + '28': 'bar', + '29': 'bar', + '30': 'bar', + '31': 'bar', + '32': 'bar', + '33': 'bar', + '34': 'bar', + '35': 'bar', + '36': 'bar', + '37': 'bar', + '38': 'bar', + '39': 'bar', + '40': 'bar', + '41': 'bar', + '42': 'bar', + '43': 'bar', + '44': 'bar', + '45': 'bar', + '46': 'bar', + '47': 'bar', + '48': 'bar', + '49': 'bar', + '50': 'bar', + '51': 'bar', + '52': 'bar', + '53': 'bar', + '54': 'bar', + '55': 'bar', + '56': 'bar', + '57': 'bar', + '58': 'bar', + '59': 'bar', + '60': 'bar', + '61': 'bar', + '62': 'bar', + '63': 'bar', + '64': 'bar', + '65': 'bar', + '66': 'bar', + '67': 'bar', + '68': 'bar', + '69': 'bar', + '70': 'bar', + '71': 'bar', + '72': 'bar', + '73': 'bar', + '74': 'bar', + '75': 'bar', + '76': 'bar', + '77': 'bar', + '78': 'bar', + '79': 'bar', + '80': 'bar', + '81': 'bar', + '82': 'bar', + '83': 'bar', + '84': 'bar', + '85': 'bar', + '86': 'bar', + '87': 'bar', + '88': 'bar', + '89': 'bar', + '90': 'bar', + '91': 'bar', + '92': 'bar', + '93': 'bar', + '94': 'bar', + '95': 'bar', + '96': 'bar', + '97': 'bar', + '98': 'bar', + '99': 'bar', + '100': 'bar', + '101': 'bar', + '102': 'bar', + '103': 'bar', + '104': 'bar', + '105': 'bar', + '106': 'bar', + '107': 'bar', + '108': 'bar', + '109': 'bar', + '110': 'bar', + '111': 'bar', + '112': 'bar', + '113': 'bar', + '114': 'bar', + '115': 'bar', + '116': 'bar', + '117': 'bar', + '118': 'bar', + '119': 'bar', + '120': 'bar', + '121': 'bar', + '122': 'bar', + '123': 'bar', + '124': 'bar', + '125': 'bar', + '126': 'bar', + '127': 'bar', + '128': 'bar', + '129': 'bar', + '130': 'bar', + '131': 'bar', + '132': 'bar', + '133': 'bar', + '134': 'bar', + '135': 'bar', + '136': 'bar', + '137': 'bar', + '138': 'bar', + '139': 'bar', + }, + type: 'custom', + }, + testing_criteria: [ + { + input: [{ content: 'content', role: 'role' }], + labels: ['string'], + model: 'model', + name: 'name', + passing_labels: ['string'], + type: 'label_model', + }, + ], + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.evals.create({ + data_source_config: { + item_schema: { + '0': 'bar', + '1': 'bar', + '2': 'bar', + '3': 'bar', + '4': 'bar', + '5': 'bar', + '6': 'bar', + '7': 'bar', + '8': 'bar', + '9': 'bar', + '10': 'bar', + '11': 'bar', + '12': 'bar', + '13': 'bar', + '14': 'bar', + '15': 'bar', + '16': 'bar', + '17': 'bar', + '18': 'bar', + '19': 'bar', + '20': 'bar', + '21': 'bar', + '22': 'bar', + '23': 'bar', + '24': 'bar', + '25': 'bar', + '26': 'bar', + '27': 'bar', + '28': 'bar', + '29': 'bar', + '30': 'bar', + '31': 'bar', + '32': 'bar', + '33': 'bar', + '34': 'bar', + '35': 'bar', + '36': 'bar', + '37': 'bar', + '38': 'bar', + '39': 'bar', + '40': 'bar', + '41': 'bar', + '42': 'bar', + '43': 'bar', + '44': 'bar', + '45': 'bar', + '46': 'bar', + '47': 'bar', + '48': 'bar', + '49': 'bar', + '50': 'bar', + '51': 'bar', + '52': 'bar', + '53': 'bar', + '54': 'bar', + '55': 'bar', + '56': 'bar', + '57': 'bar', + '58': 'bar', + '59': 'bar', + '60': 'bar', + '61': 'bar', + '62': 'bar', + '63': 'bar', + '64': 'bar', + '65': 'bar', + '66': 'bar', + '67': 'bar', + '68': 'bar', + '69': 'bar', + '70': 'bar', + '71': 'bar', + '72': 'bar', + '73': 'bar', + '74': 'bar', + '75': 'bar', + '76': 'bar', + '77': 'bar', + '78': 'bar', + '79': 'bar', + '80': 'bar', + '81': 'bar', + '82': 'bar', + '83': 'bar', + '84': 'bar', + '85': 'bar', + '86': 'bar', + '87': 'bar', + '88': 'bar', + '89': 'bar', + '90': 'bar', + '91': 'bar', + '92': 'bar', + '93': 'bar', + '94': 'bar', + '95': 'bar', + '96': 'bar', + '97': 'bar', + '98': 'bar', + '99': 'bar', + '100': 'bar', + '101': 'bar', + '102': 'bar', + '103': 'bar', + '104': 'bar', + '105': 'bar', + '106': 'bar', + '107': 'bar', + '108': 'bar', + '109': 'bar', + '110': 'bar', + '111': 'bar', + '112': 'bar', + '113': 'bar', + '114': 'bar', + '115': 'bar', + '116': 'bar', + '117': 'bar', + '118': 'bar', + '119': 'bar', + '120': 'bar', + '121': 'bar', + '122': 'bar', + '123': 'bar', + '124': 'bar', + '125': 'bar', + '126': 'bar', + '127': 'bar', + '128': 'bar', + '129': 'bar', + '130': 'bar', + '131': 'bar', + '132': 'bar', + '133': 'bar', + '134': 'bar', + '135': 'bar', + '136': 'bar', + '137': 'bar', + '138': 'bar', + '139': 'bar', + }, + type: 'custom', + include_sample_schema: true, + }, + testing_criteria: [ + { + input: [{ content: 'content', role: 'role' }], + labels: ['string'], + model: 'model', + name: 'name', + passing_labels: ['string'], + type: 'label_model', + }, + ], + metadata: { foo: 'string' }, + name: 'name', + share_with_openai: true, + }); + }); + + test('retrieve', async () => { + const responsePromise = client.evals.retrieve('eval_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.evals.retrieve('eval_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); + + test('update', async () => { + const responsePromise = client.evals.update('eval_id', {}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list', async () => { + const responsePromise = client.evals.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.evals.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.list( + { after: 'after', limit: 0, order: 'asc', order_by: 'created_at' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.evals.del('eval_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.evals.del('eval_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); +}); diff --git a/tests/api-resources/evals/runs/output-items.test.ts b/tests/api-resources/evals/runs/output-items.test.ts new file mode 100644 index 000000000..ff075b404 --- /dev/null +++ b/tests/api-resources/evals/runs/output-items.test.ts @@ -0,0 +1,61 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource outputItems', () => { + test('retrieve', async () => { + const responsePromise = client.evals.runs.outputItems.retrieve('eval_id', 'run_id', 'output_item_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.outputItems.retrieve('eval_id', 'run_id', 'output_item_id', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list', async () => { + const responsePromise = client.evals.runs.outputItems.list('eval_id', 'run_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.outputItems.list('eval_id', 'run_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.outputItems.list( + 'eval_id', + 'run_id', + { after: 'after', limit: 0, order: 'asc', status: 'fail' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/evals/runs/runs.test.ts b/tests/api-resources/evals/runs/runs.test.ts new file mode 100644 index 000000000..786df0ba1 --- /dev/null +++ b/tests/api-resources/evals/runs/runs.test.ts @@ -0,0 +1,118 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource runs', () => { + test('create: only required params', async () => { + const responsePromise = client.evals.runs.create('eval_id', { + data_source: { source: { content: [{ item: { foo: 'bar' } }], type: 'file_content' }, type: 'jsonl' }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.evals.runs.create('eval_id', { + data_source: { + source: { content: [{ item: { foo: 'bar' }, sample: { foo: 'bar' } }], type: 'file_content' }, + type: 'jsonl', + }, + metadata: { foo: 'string' }, + name: 'name', + }); + }); + + test('retrieve', async () => { + const responsePromise = client.evals.runs.retrieve('eval_id', 'run_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.retrieve('eval_id', 'run_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list', async () => { + const responsePromise = client.evals.runs.list('eval_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.evals.runs.list('eval_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.list( + 'eval_id', + { after: 'after', limit: 0, order: 'asc', status: 'queued' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.evals.runs.del('eval_id', 'run_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.del('eval_id', 'run_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('cancel', async () => { + const responsePromise = client.evals.runs.cancel('eval_id', 'run_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('cancel: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.cancel('eval_id', 'run_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts new file mode 100644 index 000000000..cb8c7a9a1 --- /dev/null +++ b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts @@ -0,0 +1,85 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource permissions', () => { + test('create: only required params', async () => { + const responsePromise = client.fineTuning.checkpoints.permissions.create( + 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + { project_ids: ['string'] }, + ); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.fineTuning.checkpoints.permissions.create( + 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + { project_ids: ['string'] }, + ); + }); + + test('retrieve', async () => { + const responsePromise = client.fineTuning.checkpoints.permissions.retrieve('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.checkpoints.permissions.retrieve('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('retrieve: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.checkpoints.permissions.retrieve( + 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + { after: 'after', limit: 0, order: 'ascending', project_id: 'project_id' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.fineTuning.checkpoints.permissions.del( + 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + ); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.checkpoints.permissions.del('ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); From a100f0a0e1d336f8a78c8bbd9e3703cda3f0c5d8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 19:44:39 +0000 Subject: [PATCH 207/246] chore(internal): fix examples (#1457) --- .stats.yml | 4 +- .../beta/threads/runs/runs.test.ts | 2 +- .../beta/threads/threads.test.ts | 2 +- tests/api-resources/evals/evals.test.ts | 293 +----------------- tests/api-resources/images.test.ts | 6 +- tests/api-resources/moderations.test.ts | 5 +- 6 files changed, 10 insertions(+), 302 deletions(-) diff --git a/.stats.yml b/.stats.yml index ebe07c137..4a82ee242 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-472fe3036ea745365257fe870c0330917fb3153705c2826f49873cd631319b0a.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-32de3bc513663c5fac922c49be41c222b6ee8c0b841d8966bcdfa489d441daa3.yml openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 -config_hash: ef19d36c307306f14f2e1cd5c834a151 +config_hash: d6c61213488683418adb860a9ee1501b diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 13ae89a00..4b2b8030b 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -37,7 +37,7 @@ describe('resource runs', () => { max_completion_tokens: 256, max_prompt_tokens: 256, metadata: { foo: 'string' }, - model: 'gpt-4o', + model: 'string', parallel_tool_calls: true, reasoning_effort: 'low', response_format: 'auto', diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index f26d6ec44..bc92a0c8a 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -121,7 +121,7 @@ describe('resource threads', () => { max_completion_tokens: 256, max_prompt_tokens: 256, metadata: { foo: 'string' }, - model: 'gpt-4o', + model: 'string', parallel_tool_calls: true, response_format: 'auto', stream: false, diff --git a/tests/api-resources/evals/evals.test.ts b/tests/api-resources/evals/evals.test.ts index 3aeb3e15c..fabc2602a 100644 --- a/tests/api-resources/evals/evals.test.ts +++ b/tests/api-resources/evals/evals.test.ts @@ -11,151 +11,7 @@ const client = new OpenAI({ describe('resource evals', () => { test('create: only required params', async () => { const responsePromise = client.evals.create({ - data_source_config: { - item_schema: { - '0': 'bar', - '1': 'bar', - '2': 'bar', - '3': 'bar', - '4': 'bar', - '5': 'bar', - '6': 'bar', - '7': 'bar', - '8': 'bar', - '9': 'bar', - '10': 'bar', - '11': 'bar', - '12': 'bar', - '13': 'bar', - '14': 'bar', - '15': 'bar', - '16': 'bar', - '17': 'bar', - '18': 'bar', - '19': 'bar', - '20': 'bar', - '21': 'bar', - '22': 'bar', - '23': 'bar', - '24': 'bar', - '25': 'bar', - '26': 'bar', - '27': 'bar', - '28': 'bar', - '29': 'bar', - '30': 'bar', - '31': 'bar', - '32': 'bar', - '33': 'bar', - '34': 'bar', - '35': 'bar', - '36': 'bar', - '37': 'bar', - '38': 'bar', - '39': 'bar', - '40': 'bar', - '41': 'bar', - '42': 'bar', - '43': 'bar', - '44': 'bar', - '45': 'bar', - '46': 'bar', - '47': 'bar', - '48': 'bar', - '49': 'bar', - '50': 'bar', - '51': 'bar', - '52': 'bar', - '53': 'bar', - '54': 'bar', - '55': 'bar', - '56': 'bar', - '57': 'bar', - '58': 'bar', - '59': 'bar', - '60': 'bar', - '61': 'bar', - '62': 'bar', - '63': 'bar', - '64': 'bar', - '65': 'bar', - '66': 'bar', - '67': 'bar', - '68': 'bar', - '69': 'bar', - '70': 'bar', - '71': 'bar', - '72': 'bar', - '73': 'bar', - '74': 'bar', - '75': 'bar', - '76': 'bar', - '77': 'bar', - '78': 'bar', - '79': 'bar', - '80': 'bar', - '81': 'bar', - '82': 'bar', - '83': 'bar', - '84': 'bar', - '85': 'bar', - '86': 'bar', - '87': 'bar', - '88': 'bar', - '89': 'bar', - '90': 'bar', - '91': 'bar', - '92': 'bar', - '93': 'bar', - '94': 'bar', - '95': 'bar', - '96': 'bar', - '97': 'bar', - '98': 'bar', - '99': 'bar', - '100': 'bar', - '101': 'bar', - '102': 'bar', - '103': 'bar', - '104': 'bar', - '105': 'bar', - '106': 'bar', - '107': 'bar', - '108': 'bar', - '109': 'bar', - '110': 'bar', - '111': 'bar', - '112': 'bar', - '113': 'bar', - '114': 'bar', - '115': 'bar', - '116': 'bar', - '117': 'bar', - '118': 'bar', - '119': 'bar', - '120': 'bar', - '121': 'bar', - '122': 'bar', - '123': 'bar', - '124': 'bar', - '125': 'bar', - '126': 'bar', - '127': 'bar', - '128': 'bar', - '129': 'bar', - '130': 'bar', - '131': 'bar', - '132': 'bar', - '133': 'bar', - '134': 'bar', - '135': 'bar', - '136': 'bar', - '137': 'bar', - '138': 'bar', - '139': 'bar', - }, - type: 'custom', - }, + data_source_config: { item_schema: { foo: 'bar' }, type: 'custom' }, testing_criteria: [ { input: [{ content: 'content', role: 'role' }], @@ -178,152 +34,7 @@ describe('resource evals', () => { test('create: required and optional params', async () => { const response = await client.evals.create({ - data_source_config: { - item_schema: { - '0': 'bar', - '1': 'bar', - '2': 'bar', - '3': 'bar', - '4': 'bar', - '5': 'bar', - '6': 'bar', - '7': 'bar', - '8': 'bar', - '9': 'bar', - '10': 'bar', - '11': 'bar', - '12': 'bar', - '13': 'bar', - '14': 'bar', - '15': 'bar', - '16': 'bar', - '17': 'bar', - '18': 'bar', - '19': 'bar', - '20': 'bar', - '21': 'bar', - '22': 'bar', - '23': 'bar', - '24': 'bar', - '25': 'bar', - '26': 'bar', - '27': 'bar', - '28': 'bar', - '29': 'bar', - '30': 'bar', - '31': 'bar', - '32': 'bar', - '33': 'bar', - '34': 'bar', - '35': 'bar', - '36': 'bar', - '37': 'bar', - '38': 'bar', - '39': 'bar', - '40': 'bar', - '41': 'bar', - '42': 'bar', - '43': 'bar', - '44': 'bar', - '45': 'bar', - '46': 'bar', - '47': 'bar', - '48': 'bar', - '49': 'bar', - '50': 'bar', - '51': 'bar', - '52': 'bar', - '53': 'bar', - '54': 'bar', - '55': 'bar', - '56': 'bar', - '57': 'bar', - '58': 'bar', - '59': 'bar', - '60': 'bar', - '61': 'bar', - '62': 'bar', - '63': 'bar', - '64': 'bar', - '65': 'bar', - '66': 'bar', - '67': 'bar', - '68': 'bar', - '69': 'bar', - '70': 'bar', - '71': 'bar', - '72': 'bar', - '73': 'bar', - '74': 'bar', - '75': 'bar', - '76': 'bar', - '77': 'bar', - '78': 'bar', - '79': 'bar', - '80': 'bar', - '81': 'bar', - '82': 'bar', - '83': 'bar', - '84': 'bar', - '85': 'bar', - '86': 'bar', - '87': 'bar', - '88': 'bar', - '89': 'bar', - '90': 'bar', - '91': 'bar', - '92': 'bar', - '93': 'bar', - '94': 'bar', - '95': 'bar', - '96': 'bar', - '97': 'bar', - '98': 'bar', - '99': 'bar', - '100': 'bar', - '101': 'bar', - '102': 'bar', - '103': 'bar', - '104': 'bar', - '105': 'bar', - '106': 'bar', - '107': 'bar', - '108': 'bar', - '109': 'bar', - '110': 'bar', - '111': 'bar', - '112': 'bar', - '113': 'bar', - '114': 'bar', - '115': 'bar', - '116': 'bar', - '117': 'bar', - '118': 'bar', - '119': 'bar', - '120': 'bar', - '121': 'bar', - '122': 'bar', - '123': 'bar', - '124': 'bar', - '125': 'bar', - '126': 'bar', - '127': 'bar', - '128': 'bar', - '129': 'bar', - '130': 'bar', - '131': 'bar', - '132': 'bar', - '133': 'bar', - '134': 'bar', - '135': 'bar', - '136': 'bar', - '137': 'bar', - '138': 'bar', - '139': 'bar', - }, - type: 'custom', - include_sample_schema: true, - }, + data_source_config: { item_schema: { foo: 'bar' }, type: 'custom', include_sample_schema: true }, testing_criteria: [ { input: [{ content: 'content', role: 'role' }], diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts index 43e67b030..4f15e20ac 100644 --- a/tests/api-resources/images.test.ts +++ b/tests/api-resources/images.test.ts @@ -25,7 +25,7 @@ describe('resource images', () => { test('createVariation: required and optional params', async () => { const response = await client.images.createVariation({ image: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'dall-e-2', + model: 'string', n: 1, response_format: 'url', size: '1024x1024', @@ -52,7 +52,7 @@ describe('resource images', () => { image: await toFile(Buffer.from('# my file contents'), 'README.md'), prompt: 'A cute baby sea otter wearing a beret', mask: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'dall-e-2', + model: 'string', n: 1, response_format: 'url', size: '1024x1024', @@ -74,7 +74,7 @@ describe('resource images', () => { test('generate: required and optional params', async () => { const response = await client.images.generate({ prompt: 'A cute baby sea otter', - model: 'dall-e-3', + model: 'string', n: 1, quality: 'standard', response_format: 'url', diff --git a/tests/api-resources/moderations.test.ts b/tests/api-resources/moderations.test.ts index 64f9acf3c..107ce9974 100644 --- a/tests/api-resources/moderations.test.ts +++ b/tests/api-resources/moderations.test.ts @@ -21,9 +21,6 @@ describe('resource moderations', () => { }); test('create: required and optional params', async () => { - const response = await client.moderations.create({ - input: 'I want to kill them.', - model: 'omni-moderation-2024-09-26', - }); + const response = await client.moderations.create({ input: 'I want to kill them.', model: 'string' }); }); }); From 58f4559d952f6e56a8f27a6bcaba0acf295623df Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 19:46:32 +0000 Subject: [PATCH 208/246] chore(internal): skip broken test (#1458) --- .stats.yml | 2 +- .../fine-tuning/checkpoints/permissions.test.ts | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.stats.yml b/.stats.yml index 4a82ee242..c39ce1186 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-32de3bc513663c5fac922c49be41c222b6ee8c0b841d8966bcdfa489d441daa3.yml openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 -config_hash: d6c61213488683418adb860a9ee1501b +config_hash: 43dc8df20ffec9d1503f91866cb2b7d9 diff --git a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts index cb8c7a9a1..e7aceae3e 100644 --- a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts +++ b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts @@ -61,7 +61,8 @@ describe('resource permissions', () => { ).rejects.toThrow(OpenAI.NotFoundError); }); - test('del', async () => { + // OpenAPI spec is slightly incorrect + test.skip('del', async () => { const responsePromise = client.fineTuning.checkpoints.permissions.del( 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', ); @@ -74,7 +75,8 @@ describe('resource permissions', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('del: request options instead of params are passed correctly', async () => { + // OpenAPI spec is slightly incorrect + test.skip('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( client.fineTuning.checkpoints.permissions.del('ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', { From e9ca1a07691976f41492e3652e1cccea33a9b70b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 19:47:04 +0000 Subject: [PATCH 209/246] release: 4.93.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 15 +++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 19 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 837894bfb..bc3f36214 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.92.1" + ".": "4.93.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 105627c5b..e6a402af8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## 4.93.0 (2025-04-08) + +Full Changelog: [v4.92.1...v4.93.0](https://github.com/openai/openai-node/compare/v4.92.1...v4.93.0) + +### Features + +* **api:** Add evalapi to sdk ([#1456](https://github.com/openai/openai-node/issues/1456)) ([ee917e3](https://github.com/openai/openai-node/commit/ee917e3335fcf44e87a28e54ce8ddfdcdfab1652)) + + +### Chores + +* **internal:** fix examples ([#1457](https://github.com/openai/openai-node/issues/1457)) ([a3dd0dd](https://github.com/openai/openai-node/commit/a3dd0dde3e8ad9cc7a02cf203d4550f91d31a2ae)) +* **internal:** skip broken test ([#1458](https://github.com/openai/openai-node/issues/1458)) ([4d2f815](https://github.com/openai/openai-node/commit/4d2f815ba5f6c426f9c21f4c3db443166389bbf8)) +* **tests:** improve enum examples ([#1454](https://github.com/openai/openai-node/issues/1454)) ([ecabce2](https://github.com/openai/openai-node/commit/ecabce282a9fb60122310942f3b647dfefae5403)) + ## 4.92.1 (2025-04-07) Full Changelog: [v4.92.0...v4.92.1](https://github.com/openai/openai-node/compare/v4.92.0...v4.92.1) diff --git a/jsr.json b/jsr.json index b986198a1..b5e49671a 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.92.1", + "version": "4.93.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 58c231fda..b9316cbe3 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.92.1", + "version": "4.93.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index bfae301de..c385afc4c 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.92.1'; // x-release-please-version +export const VERSION = '4.93.0'; // x-release-please-version From 554c3b142024bec8010474cd7e42b99a209d4daa Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 20:05:16 +0000 Subject: [PATCH 210/246] feat(api): manual updates --- .stats.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index c39ce1186..d4a4370a7 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-32de3bc513663c5fac922c49be41c222b6ee8c0b841d8966bcdfa489d441daa3.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-44b20fa9d24544217fe6bb48852037537030a1ad29b202936425110744fe66fb.yml openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 -config_hash: 43dc8df20ffec9d1503f91866cb2b7d9 +config_hash: 69e3afd56ccb0f0f822a7a9dc130fc99 From d6b396b94d9ccf64ddfe945069012b6162225fa9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 09:26:35 +0000 Subject: [PATCH 211/246] chore: workaround build errors --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index d4a4370a7..9d8d07c6a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-44b20fa9d24544217fe6bb48852037537030a1ad29b202936425110744fe66fb.yml openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 -config_hash: 69e3afd56ccb0f0f822a7a9dc130fc99 +config_hash: 5ea32de61ff42fcf5e66cff8d9e247ea From 2d452879000c07f3ef4e775e19a527f5f6fa7b4c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 16:43:34 +0000 Subject: [PATCH 212/246] chore(internal): upload builds and expand CI branch coverage (#1460) --- .github/workflows/ci.yml | 37 +++++++++++++++++++++----------- scripts/utils/upload-artifact.sh | 25 +++++++++++++++++++++ 2 files changed, 50 insertions(+), 12 deletions(-) create mode 100755 scripts/utils/upload-artifact.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6e59bb3fa..bd57cd3e6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,20 +1,18 @@ name: CI on: push: - branches: - - master - - update-specs - pull_request: - branches: - - master - - next + branches-ignore: + - 'generated' + - 'codegen/**' + - 'integrated/**' + - 'preview-head/**' + - 'preview-base/**' + - 'preview/**' jobs: lint: name: lint runs-on: ubuntu-latest - - steps: - uses: actions/checkout@v4 @@ -32,8 +30,9 @@ jobs: build: name: build runs-on: ubuntu-latest - - + permissions: + contents: read + id-token: write steps: - uses: actions/checkout@v4 @@ -47,10 +46,24 @@ jobs: - name: Check build run: ./scripts/build + + - name: Get GitHub OIDC Token + if: github.repository == 'stainless-sdks/openai-node' + id: github-oidc + uses: actions/github-script@v6 + with: + script: core.setOutput('github_token', await core.getIDToken()); + + - name: Upload tarball + if: github.repository == 'stainless-sdks/openai-node' + env: + URL: https://pkg.stainless.com/s + AUTH: ${{ steps.github-oidc.outputs.github_token }} + SHA: ${{ github.sha }} + run: ./scripts/utils/upload-artifact.sh test: name: test runs-on: ubuntu-latest - steps: - uses: actions/checkout@v4 diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh new file mode 100755 index 000000000..0e8490199 --- /dev/null +++ b/scripts/utils/upload-artifact.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -exuo pipefail + +RESPONSE=$(curl -X POST "$URL" \ + -H "Authorization: Bearer $AUTH" \ + -H "Content-Type: application/json") + +SIGNED_URL=$(echo "$RESPONSE" | jq -r '.url') + +if [[ "$SIGNED_URL" == "null" ]]; then + echo -e "\033[31mFailed to get signed URL.\033[0m" + exit 1 +fi + +UPLOAD_RESPONSE=$(tar -cz dist | curl -v -X PUT \ + -H "Content-Type: application/gzip" \ + --data-binary @- "$SIGNED_URL" 2>&1) + +if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then + echo -e "\033[32mUploaded build to Stainless storage.\033[0m" + echo -e "\033[32mInstallation: npm install 'https://pkg.stainless.com/s/openai-node/$SHA'\033[0m" +else + echo -e "\033[31mFailed to upload artifact.\033[0m" + exit 1 +fi From 77fc77f7d05d03eafe6c8f002044c60c4bab3c64 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 21:08:51 +0000 Subject: [PATCH 213/246] chore(internal): reduce CI branch coverage --- .github/workflows/ci.yml | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bd57cd3e6..2ed1eead8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,13 +1,12 @@ name: CI on: push: - branches-ignore: - - 'generated' - - 'codegen/**' - - 'integrated/**' - - 'preview-head/**' - - 'preview-base/**' - - 'preview/**' + branches: + - master + pull_request: + branches: + - master + - next jobs: lint: From 6558b7ca8aef2f98f47a07bc206eb4a789097510 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 14:30:54 +0000 Subject: [PATCH 214/246] chore(client): minor internal fixes --- src/core.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/core.ts b/src/core.ts index ccc677e0e..cfd4eeaa6 100644 --- a/src/core.ts +++ b/src/core.ts @@ -331,10 +331,10 @@ export abstract class APIClient { } buildRequest( - options: FinalRequestOptions, + inputOptions: FinalRequestOptions, { retryCount = 0 }: { retryCount?: number } = {}, ): { req: RequestInit; url: string; timeout: number } { - options = { ...options }; + const options = { ...inputOptions }; const { method, path, query, headers: headers = {} } = options; const body = @@ -362,8 +362,8 @@ export abstract class APIClient { } if (this.idempotencyHeader && method !== 'get') { - if (!options.idempotencyKey) options.idempotencyKey = this.defaultIdempotencyKey(); - headers[this.idempotencyHeader] = options.idempotencyKey; + if (!inputOptions.idempotencyKey) inputOptions.idempotencyKey = this.defaultIdempotencyKey(); + headers[this.idempotencyHeader] = inputOptions.idempotencyKey; } const reqHeaders = this.buildHeaders({ options, headers, contentLength, retryCount }); From 840e7de7870835488d4c823d97afdf5d53a739be Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 16:40:57 +0000 Subject: [PATCH 215/246] feat(api): adding gpt-4.1 family of model IDs --- .stats.yml | 4 ++-- src/resources/beta/assistants.ts | 6 ++++++ src/resources/shared.ts | 6 ++++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index 9d8d07c6a..b40485bd0 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-44b20fa9d24544217fe6bb48852037537030a1ad29b202936425110744fe66fb.yml -openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a555f81249cb084f463dcefa4aba069f9341fdaf3dd6ac27d7f237fc90e8f488.yml +openapi_spec_hash: 8e590296cd1a54b9508510b0c7a2c45a config_hash: 5ea32de61ff42fcf5e66cff8d9e247ea diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 0668dcf54..bf957db95 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -1337,6 +1337,12 @@ export interface AssistantUpdateParams { */ model?: | (string & {}) + | 'gpt-4.1' + | 'gpt-4.1-mini' + | 'gpt-4.1-nano' + | 'gpt-4.1-2025-04-14' + | 'gpt-4.1-mini-2025-04-14' + | 'gpt-4.1-nano-2025-04-14' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o1' diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 3e8ded763..94ef50585 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -9,6 +9,12 @@ export type AllModels = | 'computer-use-preview-2025-03-11'; export type ChatModel = + | 'gpt-4.1' + | 'gpt-4.1-mini' + | 'gpt-4.1-nano' + | 'gpt-4.1-2025-04-14' + | 'gpt-4.1-mini-2025-04-14' + | 'gpt-4.1-nano-2025-04-14' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o1' From bd501650af112dd69a2b220beadbb30f42cd9f77 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 16:41:30 +0000 Subject: [PATCH 216/246] release: 4.94.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 17 +++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 21 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index bc3f36214..12b27aa8e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.93.0" + ".": "4.94.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e6a402af8..d4e7613fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,22 @@ # Changelog +## 4.94.0 (2025-04-14) + +Full Changelog: [v4.93.0...v4.94.0](https://github.com/openai/openai-node/compare/v4.93.0...v4.94.0) + +### Features + +* **api:** adding gpt-4.1 family of model IDs ([bddcbcf](https://github.com/openai/openai-node/commit/bddcbcffdc409ffc8a078a65bbd302cd50b35ff0)) +* **api:** manual updates ([7532f48](https://github.com/openai/openai-node/commit/7532f48ad25c5125064a59985587c20c47a2cbfb)) + + +### Chores + +* **client:** minor internal fixes ([d342f17](https://github.com/openai/openai-node/commit/d342f17e2642da5ee83d080b410dc3c4fe153814)) +* **internal:** reduce CI branch coverage ([a49b94a](https://github.com/openai/openai-node/commit/a49b94a9aebd3e30e1802fff633e1b46cfb81942)) +* **internal:** upload builds and expand CI branch coverage ([#1460](https://github.com/openai/openai-node/issues/1460)) ([7e23bb4](https://github.com/openai/openai-node/commit/7e23bb4f4a09303195b612cc5b393cc41c1d855b)) +* workaround build errors ([913eba8](https://github.com/openai/openai-node/commit/913eba828d116f49fa78b219c62274c1e95c6f17)) + ## 4.93.0 (2025-04-08) Full Changelog: [v4.92.1...v4.93.0](https://github.com/openai/openai-node/compare/v4.92.1...v4.93.0) diff --git a/jsr.json b/jsr.json index b5e49671a..891e18dcb 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.93.0", + "version": "4.94.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index b9316cbe3..a399b6cf6 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.93.0", + "version": "4.94.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c385afc4c..4a35de04b 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.93.0'; // x-release-please-version +export const VERSION = '4.94.0'; // x-release-please-version From a0d000094f69db82974de4ba792cd07d4ab59c21 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 16 Apr 2025 16:42:31 +0000 Subject: [PATCH 217/246] feat(api): add o3 and o4-mini model IDs --- .stats.yml | 6 +- src/resources/chat/completions/completions.ts | 55 ++++++++++++++++--- src/resources/completions.ts | 2 + src/resources/responses/responses.ts | 46 +++++++++++++++- src/resources/shared.ts | 19 +++++-- .../api-resources/responses/responses.test.ts | 3 +- 6 files changed, 112 insertions(+), 19 deletions(-) diff --git a/.stats.yml b/.stats.yml index b40485bd0..848c5b5ad 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a555f81249cb084f463dcefa4aba069f9341fdaf3dd6ac27d7f237fc90e8f488.yml -openapi_spec_hash: 8e590296cd1a54b9508510b0c7a2c45a -config_hash: 5ea32de61ff42fcf5e66cff8d9e247ea +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5633633cc38734869cf7d993f7b549bb8e4d10e0ec45381ec2cd91507cd8eb8f.yml +openapi_spec_hash: c855121b2b2324b99499c9244c21d24d +config_hash: d20837393b73efdb19cd08e04c1cc9a1 diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index f0ef1d0cc..17edac02c 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -140,9 +140,25 @@ export interface ChatCompletion { object: 'chat.completion'; /** - * The service tier used for processing the request. + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. */ - service_tier?: 'scale' | 'default' | null; + service_tier?: 'auto' | 'default' | 'flex' | null; /** * This fingerprint represents the backend configuration that the model runs with. @@ -319,11 +335,11 @@ export interface ChatCompletionAudioParam { * Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, * or `pcm16`. */ - format: 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16'; + format: 'wav' | 'aac' | 'mp3' | 'flac' | 'opus' | 'pcm16'; /** * The voice the model uses to respond. Supported voices are `alloy`, `ash`, - * `ballad`, `coral`, `echo`, `sage`, and `shimmer`. + * `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`. */ voice: | (string & {}) @@ -375,9 +391,25 @@ export interface ChatCompletionChunk { object: 'chat.completion.chunk'; /** - * The service tier used for processing the request. + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. */ - service_tier?: 'scale' | 'default' | null; + service_tier?: 'auto' | 'default' | 'flex' | null; /** * This fingerprint represents the backend configuration that the model runs with. @@ -1114,7 +1146,7 @@ export interface ChatCompletionCreateParamsBase { messages: Array; /** - * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a * wide range of models with different capabilities, performance characteristics, * and price points. Refer to the * [model guide](https://platform.openai.com/docs/models) to browse and compare @@ -1194,7 +1226,7 @@ export interface ChatCompletionCreateParamsBase { * * This value is now deprecated in favor of `max_completion_tokens`, and is not * compatible with - * [o1 series models](https://platform.openai.com/docs/guides/reasoning). + * [o-series models](https://platform.openai.com/docs/guides/reasoning). */ max_tokens?: number | null; @@ -1296,14 +1328,19 @@ export interface ChatCompletionCreateParamsBase { * latency guarentee. * - If set to 'default', the request will be processed using the default service * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). * - When not set, the default behavior is 'auto'. * * When this parameter is set, the response body will include the `service_tier` * utilized. */ - service_tier?: 'auto' | 'default' | null; + service_tier?: 'auto' | 'default' | 'flex' | null; /** + * Not supported with latest reasoning models `o3` and `o4-mini`. + * * Up to 4 sequences where the API will stop generating further tokens. The * returned text will not contain the stop sequence. */ diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 664e39d9d..5cbec5e3c 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -293,6 +293,8 @@ export interface CompletionCreateParamsBase { seed?: number | null; /** + * Not supported with latest reasoning models `o3` and `o4-mini`. + * * Up to 4 sequences where the API will stop generating further tokens. The * returned text will not contain the stop sequence. */ diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index a46c4182c..52dd079fc 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -321,7 +321,7 @@ export interface Response { metadata: Shared.Metadata | null; /** - * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a * wide range of models with different capabilities, performance characteristics, * and price points. Refer to the * [model guide](https://platform.openai.com/docs/models) to browse and compare @@ -414,6 +414,27 @@ export interface Response { */ reasoning?: Shared.Reasoning | null; + /** + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. + */ + service_tier?: 'auto' | 'default' | 'flex' | null; + /** * The status of the response generation. One of `completed`, `failed`, * `in_progress`, or `incomplete`. @@ -2673,7 +2694,7 @@ export interface ResponseCreateParamsBase { input: string | ResponseInput; /** - * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a * wide range of models with different capabilities, performance characteristics, * and price points. Refer to the * [model guide](https://platform.openai.com/docs/models) to browse and compare @@ -2740,6 +2761,27 @@ export interface ResponseCreateParamsBase { */ reasoning?: Shared.Reasoning | null; + /** + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. + */ + service_tier?: 'auto' | 'default' | 'flex' | null; + /** * Whether to store the generated model response for later retrieval via API. */ diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 94ef50585..1c0006b18 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -15,6 +15,10 @@ export type ChatModel = | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano-2025-04-14' + | 'o4-mini' + | 'o4-mini-2025-04-16' + | 'o3' + | 'o3-2025-04-16' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o1' @@ -187,13 +191,20 @@ export interface Reasoning { effort?: ReasoningEffort | null; /** - * **computer_use_preview only** + * @deprecated **Deprecated:** use `summary` instead. * * A summary of the reasoning performed by the model. This can be useful for - * debugging and understanding the model's reasoning process. One of `concise` or - * `detailed`. + * debugging and understanding the model's reasoning process. One of `auto`, + * `concise`, or `detailed`. */ - generate_summary?: 'concise' | 'detailed' | null; + generate_summary?: 'auto' | 'concise' | 'detailed' | null; + + /** + * A summary of the reasoning performed by the model. This can be useful for + * debugging and understanding the model's reasoning process. One of `auto`, + * `concise`, or `detailed`. + */ + summary?: 'auto' | 'concise' | 'detailed' | null; } /** diff --git a/tests/api-resources/responses/responses.test.ts b/tests/api-resources/responses/responses.test.ts index e10722738..cf7e9cf3c 100644 --- a/tests/api-resources/responses/responses.test.ts +++ b/tests/api-resources/responses/responses.test.ts @@ -30,7 +30,8 @@ describe('resource responses', () => { metadata: { foo: 'string' }, parallel_tool_calls: true, previous_response_id: 'previous_response_id', - reasoning: { effort: 'low', generate_summary: 'concise' }, + reasoning: { effort: 'low', generate_summary: 'auto', summary: 'auto' }, + service_tier: 'auto', store: true, stream: false, temperature: 1, From b5a5ee43837778adae925f6abcbc395b30ed826b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 16 Apr 2025 16:43:04 +0000 Subject: [PATCH 218/246] release: 4.95.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 12b27aa8e..077a9fd7a 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.94.0" + ".": "4.95.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d4e7613fd..3f5f1ef26 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.95.0 (2025-04-16) + +Full Changelog: [v4.94.0...v4.95.0](https://github.com/openai/openai-node/compare/v4.94.0...v4.95.0) + +### Features + +* **api:** add o3 and o4-mini model IDs ([4845cd9](https://github.com/openai/openai-node/commit/4845cd9ac17450022f1632ae01397e41a97f1662)) + ## 4.94.0 (2025-04-14) Full Changelog: [v4.93.0...v4.94.0](https://github.com/openai/openai-node/compare/v4.93.0...v4.94.0) diff --git a/jsr.json b/jsr.json index 891e18dcb..d6cacf5f9 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.94.0", + "version": "4.95.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index a399b6cf6..512e2ee55 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.94.0", + "version": "4.95.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 4a35de04b..5f581d42d 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.94.0'; // x-release-please-version +export const VERSION = '4.95.0'; // x-release-please-version From aea2d123d200e6a7eae11e66583127270a8db8bf Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 18 Apr 2025 16:49:09 +0100 Subject: [PATCH 219/246] fix(zod): warn on optional field usage (#1469) --- .../zod-to-json-schema/parsers/object.ts | 12 ++++- tests/helpers/zod.test.ts | 52 +++++++++++++++++++ 2 files changed, 62 insertions(+), 2 deletions(-) diff --git a/src/_vendor/zod-to-json-schema/parsers/object.ts b/src/_vendor/zod-to-json-schema/parsers/object.ts index f2120c8fe..25e5db116 100644 --- a/src/_vendor/zod-to-json-schema/parsers/object.ts +++ b/src/_vendor/zod-to-json-schema/parsers/object.ts @@ -39,12 +39,20 @@ export function parseObjectDef(def: ZodObjectDef, refs: Refs) { [propName, propDef], ) => { if (propDef === undefined || propDef._def === undefined) return acc; + const propertyPath = [...refs.currentPath, 'properties', propName]; const parsedDef = parseDef(propDef._def, { ...refs, - currentPath: [...refs.currentPath, 'properties', propName], - propertyPath: [...refs.currentPath, 'properties', propName], + currentPath: propertyPath, + propertyPath, }); if (parsedDef === undefined) return acc; + if (refs.openaiStrictMode && propDef.isOptional() && !propDef.isNullable()) { + console.warn( + `Zod field at \`${propertyPath.join( + '/', + )}\` uses \`.optional()\` without \`.nullable()\` which is not supported by the API. See: https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#all-fields-must-be-required\nThis will become an error in a future version of the SDK.`, + ); + } return { properties: { ...acc.properties, diff --git a/tests/helpers/zod.test.ts b/tests/helpers/zod.test.ts index 493b4c0c8..02d8a7a8f 100644 --- a/tests/helpers/zod.test.ts +++ b/tests/helpers/zod.test.ts @@ -278,4 +278,56 @@ describe('zodResponseFormat', () => { } `); }); + + it('warns on optional fields', () => { + const consoleSpy = jest.spyOn(console, 'warn'); + consoleSpy.mockClear(); + + zodResponseFormat( + z.object({ + required: z.string(), + optional: z.string().optional(), + optional_and_nullable: z.string().optional().nullable(), + }), + 'schema', + ); + + expect(consoleSpy).toHaveBeenCalledWith( + 'Zod field at `#/definitions/schema/properties/optional` uses `.optional()` without `.nullable()` which is not supported by the API. See: https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#all-fields-must-be-required\nThis will become an error in a future version of the SDK.', + ); + expect(consoleSpy).toHaveBeenCalledTimes(1); + }); + + it('warns on nested optional fields', () => { + const consoleSpy = jest.spyOn(console, 'warn'); + consoleSpy.mockClear(); + + zodResponseFormat( + z.object({ + foo: z.object({ bar: z.array(z.object({ can_be_missing: z.boolean().optional() })) }), + }), + 'schema', + ); + + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'Zod field at `#/definitions/schema/properties/foo/properties/bar/items/properties/can_be_missing` uses `.optional()`', + ), + ); + expect(consoleSpy).toHaveBeenCalledTimes(1); + }); + + it('does not warn on union nullable fields', () => { + const consoleSpy = jest.spyOn(console, 'warn'); + consoleSpy.mockClear(); + + zodResponseFormat( + z.object({ + union: z.union([z.string(), z.null()]).optional(), + }), + 'schema', + ); + + expect(consoleSpy).toHaveBeenCalledTimes(0); + }); }); From 2785c1186b528e4ab3a2a7c9282e041aaa4c13f6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 18 Apr 2025 15:50:02 +0000 Subject: [PATCH 220/246] release: 4.95.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 077a9fd7a..2f61d58b0 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.95.0" + ".": "4.95.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f5f1ef26..1f864e203 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.95.1 (2025-04-18) + +Full Changelog: [v4.95.0...v4.95.1](https://github.com/openai/openai-node/compare/v4.95.0...v4.95.1) + +### Bug Fixes + +* **zod:** warn on optional field usage ([#1469](https://github.com/openai/openai-node/issues/1469)) ([aea2d12](https://github.com/openai/openai-node/commit/aea2d123d200e6a7eae11e66583127270a8db8bf)) + ## 4.95.0 (2025-04-16) Full Changelog: [v4.94.0...v4.95.0](https://github.com/openai/openai-node/compare/v4.94.0...v4.95.0) diff --git a/jsr.json b/jsr.json index d6cacf5f9..8271c8522 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.95.0", + "version": "4.95.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 512e2ee55..76fe7d4d0 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.95.0", + "version": "4.95.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 5f581d42d..cd1995322 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.95.0'; // x-release-please-version +export const VERSION = '4.95.1'; // x-release-please-version From bc492ba124cddd545eec7a1199712452c573a7a4 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 18 Apr 2025 17:56:50 +0100 Subject: [PATCH 221/246] fix(types): export AssistantStream (#1472) --- src/resources/beta/assistants.ts | 3 +++ src/resources/beta/threads/threads.ts | 2 ++ 2 files changed, 5 insertions(+) diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index bf957db95..00a6ff2cf 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -9,6 +9,7 @@ import * as ThreadsAPI from './threads/threads'; import * as RunsAPI from './threads/runs/runs'; import * as StepsAPI from './threads/runs/steps'; import { CursorPage, type CursorPageParams } from '../../pagination'; +import { AssistantStream } from '../../lib/AssistantStream'; export class Assistants extends APIResource { /** @@ -1517,4 +1518,6 @@ export declare namespace Assistants { type AssistantUpdateParams as AssistantUpdateParams, type AssistantListParams as AssistantListParams, }; + + export { AssistantStream }; } diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 8075ba0ac..1e0077a3f 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -1718,4 +1718,6 @@ export declare namespace Threads { type MessageUpdateParams as MessageUpdateParams, type MessageListParams as MessageListParams, }; + + export { AssistantStream }; } From 939f6365c304c037e0473207d85bbc2f2731b105 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Apr 2025 20:12:57 +0000 Subject: [PATCH 222/246] chore(ci): add timeout thresholds for CI jobs --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2ed1eead8..b0aac41b2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,6 +10,7 @@ on: jobs: lint: + timeout-minutes: 10 name: lint runs-on: ubuntu-latest steps: @@ -27,6 +28,7 @@ jobs: run: ./scripts/lint build: + timeout-minutes: 5 name: build runs-on: ubuntu-latest permissions: @@ -61,6 +63,7 @@ jobs: SHA: ${{ github.sha }} run: ./scripts/utils/upload-artifact.sh test: + timeout-minutes: 10 name: test runs-on: ubuntu-latest steps: @@ -78,6 +81,7 @@ jobs: run: ./scripts/test examples: + timeout-minutes: 10 name: examples runs-on: ubuntu-latest if: github.repository == 'openai/openai-node' From c353531a238863e7f386a66dcee9f02b8115dd47 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Apr 2025 16:30:01 +0000 Subject: [PATCH 223/246] feat(api): adding new image model support --- .stats.yml | 6 +- api.md | 6 +- src/resources/beta/realtime/realtime.ts | 98 +- src/resources/beta/threads/threads.ts | 4 +- src/resources/evals/evals.ts | 749 +++++++- src/resources/evals/runs/runs.ts | 1699 ++++++++++++++--- .../fine-tuning/checkpoints/permissions.ts | 6 +- src/resources/images.ts | 202 +- src/resources/responses/responses.ts | 162 ++ tests/api-resources/evals/evals.test.ts | 1 - .../checkpoints/permissions.test.ts | 15 +- tests/api-resources/images.test.ts | 7 +- 12 files changed, 2534 insertions(+), 421 deletions(-) diff --git a/.stats.yml b/.stats.yml index 848c5b5ad..d92408173 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5633633cc38734869cf7d993f7b549bb8e4d10e0ec45381ec2cd91507cd8eb8f.yml -openapi_spec_hash: c855121b2b2324b99499c9244c21d24d -config_hash: d20837393b73efdb19cd08e04c1cc9a1 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-8b68ae6b807dca92e914da1dd9e835a20f69b075e79102a264367fd7fddddb33.yml +openapi_spec_hash: b6ade5b1a6327339e6669e1134de2d03 +config_hash: b597cd9a31e9e5ec709e2eefb4c54122 diff --git a/api.md b/api.md index 2eb54b34a..49e6548a8 100644 --- a/api.md +++ b/api.md @@ -249,7 +249,7 @@ Methods: - client.fineTuning.checkpoints.permissions.create(fineTunedModelCheckpoint, { ...params }) -> PermissionCreateResponsesPage - client.fineTuning.checkpoints.permissions.retrieve(fineTunedModelCheckpoint, { ...params }) -> PermissionRetrieveResponse -- client.fineTuning.checkpoints.permissions.del(fineTunedModelCheckpoint) -> PermissionDeleteResponse +- client.fineTuning.checkpoints.permissions.del(fineTunedModelCheckpoint, permissionId) -> PermissionDeleteResponse # VectorStores @@ -626,6 +626,10 @@ Types: - ResponseOutputRefusal - ResponseOutputText - ResponseReasoningItem +- ResponseReasoningSummaryPartAddedEvent +- ResponseReasoningSummaryPartDoneEvent +- ResponseReasoningSummaryTextDeltaEvent +- ResponseReasoningSummaryTextDoneEvent - ResponseRefusalDeltaEvent - ResponseRefusalDoneEvent - ResponseStatus diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index 1c02fdd1a..5012b1edd 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -915,12 +915,34 @@ export type RealtimeClientEvent = | ConversationItemTruncateEvent | InputAudioBufferAppendEvent | InputAudioBufferClearEvent + | RealtimeClientEvent.OutputAudioBufferClear | InputAudioBufferCommitEvent | ResponseCancelEvent | ResponseCreateEvent | SessionUpdateEvent | TranscriptionSessionUpdate; +export namespace RealtimeClientEvent { + /** + * **WebRTC Only:** Emit to cut off the current audio response. This will trigger + * the server to stop generating audio and emit a `output_audio_buffer.cleared` + * event. This event should be preceded by a `response.cancel` client event to stop + * the generation of the current response. + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferClear { + /** + * The event type, must be `output_audio_buffer.clear`. + */ + type: 'output_audio_buffer.clear'; + + /** + * The unique ID of the client event used for error handling. + */ + event_id?: string; + } +} + /** * The response resource. */ @@ -1174,7 +1196,10 @@ export type RealtimeServerEvent = | ResponseTextDoneEvent | SessionCreatedEvent | SessionUpdatedEvent - | TranscriptionSessionUpdatedEvent; + | TranscriptionSessionUpdatedEvent + | RealtimeServerEvent.OutputAudioBufferStarted + | RealtimeServerEvent.OutputAudioBufferStopped + | RealtimeServerEvent.OutputAudioBufferCleared; export namespace RealtimeServerEvent { /** @@ -1197,6 +1222,77 @@ export namespace RealtimeServerEvent { */ type: 'conversation.item.retrieved'; } + + /** + * **WebRTC Only:** Emitted when the server begins streaming audio to the client. + * This event is emitted after an audio content part has been added + * (`response.content_part.added`) to the response. + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferStarted { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The unique ID of the response that produced the audio. + */ + response_id: string; + + /** + * The event type, must be `output_audio_buffer.started`. + */ + type: 'output_audio_buffer.started'; + } + + /** + * **WebRTC Only:** Emitted when the output audio buffer has been completely + * drained on the server, and no more audio is forthcoming. This event is emitted + * after the full response data has been sent to the client (`response.done`). + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferStopped { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The unique ID of the response that produced the audio. + */ + response_id: string; + + /** + * The event type, must be `output_audio_buffer.stopped`. + */ + type: 'output_audio_buffer.stopped'; + } + + /** + * **WebRTC Only:** Emitted when the output audio buffer is cleared. This happens + * either in VAD mode when the user has interrupted + * (`input_audio_buffer.speech_started`), or when the client has emitted the + * `output_audio_buffer.clear` event to manually cut off the current audio + * response. + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferCleared { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The unique ID of the response that produced the audio. + */ + response_id: string; + + /** + * The event type, must be `output_audio_buffer.cleared`. + */ + type: 'output_audio_buffer.cleared'; + } } /** diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 1e0077a3f..2e5ab1cc8 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -686,9 +686,7 @@ export interface ThreadCreateAndRunParamsBase { * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. */ - tools?: Array< - AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool | AssistantsAPI.FunctionTool - > | null; + tools?: Array | null; /** * An alternative to sampling with temperature, called nucleus sampling, where the diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts index 84ff6d1bb..caef7acc0 100644 --- a/src/resources/evals/evals.ts +++ b/src/resources/evals/evals.ts @@ -4,6 +4,7 @@ import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; import * as Core from '../../core'; import * as Shared from '../shared'; +import * as ResponsesAPI from '../responses/responses'; import * as RunsAPI from './runs/runs'; import { CreateEvalCompletionsRunDataSource, @@ -107,7 +108,7 @@ export interface EvalCustomDataSourceConfig { * the evaluation. */ export interface EvalLabelModelGrader { - input: Array; + input: Array; /** * The labels to assign to each item in the evaluation. @@ -136,57 +137,43 @@ export interface EvalLabelModelGrader { } export namespace EvalLabelModelGrader { - export interface InputMessage { - content: InputMessage.Content; - + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { /** - * The role of the message. One of `user`, `system`, or `developer`. + * Text inputs to the model - can contain template strings. */ - role: 'user' | 'system' | 'developer'; + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; /** - * The type of item, which is always `message`. + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. */ - type: 'message'; - } - - export namespace InputMessage { - export interface Content { - /** - * The text content. - */ - text: string; - - /** - * The type of content, which is always `input_text`. - */ - type: 'input_text'; - } - } - - export interface Assistant { - content: Assistant.Content; + role: 'user' | 'assistant' | 'system' | 'developer'; /** - * The role of the message. Must be `assistant` for output. + * The type of the message input. Always `message`. */ - role: 'assistant'; + type?: 'message'; + } + export namespace Input { /** - * The type of item, which is always `message`. + * A text output from the model. */ - type: 'message'; - } - - export namespace Assistant { - export interface Content { + export interface OutputText { /** - * The text content. + * The text output from the model. */ text: string; /** - * The type of content, which is always `output_text`. + * The type of the output text. Always `output_text`. */ type: 'output_text'; } @@ -259,8 +246,8 @@ export interface EvalStringCheckGrader { */ export interface EvalTextSimilarityGrader { /** - * The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, - * `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + * The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, + * `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. */ evaluation_metric: | 'fuzzy_match' @@ -272,8 +259,7 @@ export interface EvalTextSimilarityGrader { | 'rouge_3' | 'rouge_4' | 'rouge_5' - | 'rouge_l' - | 'cosine'; + | 'rouge_l'; /** * The text being graded. @@ -346,14 +332,131 @@ export interface EvalCreateResponse { object: 'eval'; /** - * Indicates whether the evaluation is shared with OpenAI. + * A list of testing criteria. */ - share_with_openai: boolean; + testing_criteria: Array< + | EvalLabelModelGrader + | EvalStringCheckGrader + | EvalTextSimilarityGrader + | EvalCreateResponse.Python + | EvalCreateResponse.ScoreModel + >; +} +export namespace EvalCreateResponse { /** - * A list of testing criteria. + * A PythonGrader object that runs a python script on the input. */ - testing_criteria: Array; + export interface Python { + /** + * The name of the grader. + */ + name: string; + + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. + */ + export interface ScoreModel { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; + } + + export namespace ScoreModel { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } } /** @@ -401,14 +504,131 @@ export interface EvalRetrieveResponse { object: 'eval'; /** - * Indicates whether the evaluation is shared with OpenAI. + * A list of testing criteria. */ - share_with_openai: boolean; + testing_criteria: Array< + | EvalLabelModelGrader + | EvalStringCheckGrader + | EvalTextSimilarityGrader + | EvalRetrieveResponse.Python + | EvalRetrieveResponse.ScoreModel + >; +} +export namespace EvalRetrieveResponse { /** - * A list of testing criteria. + * A PythonGrader object that runs a python script on the input. + */ + export interface Python { + /** + * The name of the grader. + */ + name: string; + + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. */ - testing_criteria: Array; + export interface ScoreModel { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; + } + + export namespace ScoreModel { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } } /** @@ -456,14 +676,131 @@ export interface EvalUpdateResponse { object: 'eval'; /** - * Indicates whether the evaluation is shared with OpenAI. + * A list of testing criteria. + */ + testing_criteria: Array< + | EvalLabelModelGrader + | EvalStringCheckGrader + | EvalTextSimilarityGrader + | EvalUpdateResponse.Python + | EvalUpdateResponse.ScoreModel + >; +} + +export namespace EvalUpdateResponse { + /** + * A PythonGrader object that runs a python script on the input. */ - share_with_openai: boolean; + export interface Python { + /** + * The name of the grader. + */ + name: string; + + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + } /** - * A list of testing criteria. + * A ScoreModelGrader object that uses a model to assign a score to the input. */ - testing_criteria: Array; + export interface ScoreModel { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; + } + + export namespace ScoreModel { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } } /** @@ -511,14 +848,131 @@ export interface EvalListResponse { object: 'eval'; /** - * Indicates whether the evaluation is shared with OpenAI. + * A list of testing criteria. */ - share_with_openai: boolean; + testing_criteria: Array< + | EvalLabelModelGrader + | EvalStringCheckGrader + | EvalTextSimilarityGrader + | EvalListResponse.Python + | EvalListResponse.ScoreModel + >; +} +export namespace EvalListResponse { /** - * A list of testing criteria. + * A PythonGrader object that runs a python script on the input. + */ + export interface Python { + /** + * The name of the grader. + */ + name: string; + + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. */ - testing_criteria: Array; + export interface ScoreModel { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; + } + + export namespace ScoreModel { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } } export interface EvalDeleteResponse { @@ -533,12 +987,18 @@ export interface EvalCreateParams { /** * The configuration for the data source used for the evaluation runs. */ - data_source_config: EvalCreateParams.Custom | EvalCreateParams.StoredCompletions; + data_source_config: EvalCreateParams.Custom | EvalCreateParams.Logs; /** * A list of graders for all eval runs in this group. */ - testing_criteria: Array; + testing_criteria: Array< + | EvalCreateParams.LabelModel + | EvalStringCheckGrader + | EvalTextSimilarityGrader + | EvalCreateParams.Python + | EvalCreateParams.ScoreModel + >; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -554,11 +1014,6 @@ export interface EvalCreateParams { * The name of the evaluation. */ name?: string; - - /** - * Indicates whether the evaluation is shared with OpenAI. - */ - share_with_openai?: boolean; } export namespace EvalCreateParams { @@ -572,7 +1027,7 @@ export namespace EvalCreateParams { */ export interface Custom { /** - * The json schema for the run data source items. + * The json schema for each row in the data source. */ item_schema: Record; @@ -582,7 +1037,8 @@ export namespace EvalCreateParams { type: 'custom'; /** - * Whether to include the sample schema in the data source. + * Whether the eval should expect you to populate the sample namespace (ie, by + * generating responses off of your data source) */ include_sample_schema?: boolean; } @@ -592,21 +1048,16 @@ export namespace EvalCreateParams { * completions query. This is usually metadata like `usecase=chatbot` or * `prompt-version=v2`, etc. */ - export interface StoredCompletions { + export interface Logs { /** - * The type of data source. Always `stored_completions`. + * The type of data source. Always `logs`. */ - type: 'stored_completions'; + type: 'logs'; /** - * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format, and - * querying for objects via API or the dashboard. - * - * Keys are strings with a maximum length of 64 characters. Values are strings with - * a maximum length of 512 characters. + * Metadata filters for the logs data source. */ - metadata?: Shared.Metadata | null; + metadata?: Record; } /** @@ -614,7 +1065,11 @@ export namespace EvalCreateParams { * the evaluation. */ export interface LabelModel { - input: Array; + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + input: Array; /** * The labels to classify to each item in the evaluation. @@ -655,57 +1110,157 @@ export namespace EvalCreateParams { role: string; } - export interface InputMessage { - content: InputMessage.Content; + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; /** - * The role of the message. One of `user`, `system`, or `developer`. + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. */ - role: 'user' | 'system' | 'developer'; + role: 'user' | 'assistant' | 'system' | 'developer'; /** - * The type of item, which is always `message`. + * The type of the message input. Always `message`. */ - type: 'message'; + type?: 'message'; } - export namespace InputMessage { - export interface Content { + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { /** - * The text content. + * The text output from the model. */ text: string; /** - * The type of content, which is always `input_text`. + * The type of the output text. Always `output_text`. */ - type: 'input_text'; + type: 'output_text'; } } + } + + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface Python { + /** + * The name of the grader. + */ + name: string; - export interface OutputMessage { - content: OutputMessage.Content; + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. + */ + export interface ScoreModel { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; + } + + export namespace ScoreModel { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { /** - * The role of the message. Must be `assistant` for output. + * Text inputs to the model - can contain template strings. */ - role: 'assistant'; + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; /** - * The type of item, which is always `message`. + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. */ - type: 'message'; + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; } - export namespace OutputMessage { - export interface Content { + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { /** - * The text content. + * The text output from the model. */ text: string; /** - * The type of content, which is always `output_text`. + * The type of the output text. Always `output_text`. */ type: 'output_text'; } diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts index ca2b7f424..50c07a514 100644 --- a/src/resources/evals/runs/runs.ts +++ b/src/resources/evals/runs/runs.ts @@ -4,6 +4,7 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; import * as Shared from '../../shared'; +import * as ResponsesAPI from '../../responses/responses'; import * as OutputItemsAPI from './output-items'; import { OutputItemListParams, @@ -83,15 +84,6 @@ export class RunListResponsesPage extends CursorPage {} * A CompletionsRunDataSource object describing a model sampling configuration. */ export interface CreateEvalCompletionsRunDataSource { - input_messages: - | CreateEvalCompletionsRunDataSource.Template - | CreateEvalCompletionsRunDataSource.ItemReference; - - /** - * The name of the model to use for generating completions (e.g. "o3-mini"). - */ - model: string; - /** * A StoredCompletionsRunDataSource configuration describing a set of filters */ @@ -105,105 +97,19 @@ export interface CreateEvalCompletionsRunDataSource { */ type: 'completions'; + input_messages?: + | CreateEvalCompletionsRunDataSource.Template + | CreateEvalCompletionsRunDataSource.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + sampling_params?: CreateEvalCompletionsRunDataSource.SamplingParams; } export namespace CreateEvalCompletionsRunDataSource { - export interface Template { - /** - * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. - */ - template: Array; - - /** - * The type of input messages. Always `template`. - */ - type: 'template'; - } - - export namespace Template { - export interface ChatMessage { - /** - * The content of the message. - */ - content: string; - - /** - * The role of the message (e.g. "system", "assistant", "user"). - */ - role: string; - } - - export interface InputMessage { - content: InputMessage.Content; - - /** - * The role of the message. One of `user`, `system`, or `developer`. - */ - role: 'user' | 'system' | 'developer'; - - /** - * The type of item, which is always `message`. - */ - type: 'message'; - } - - export namespace InputMessage { - export interface Content { - /** - * The text content. - */ - text: string; - - /** - * The type of content, which is always `input_text`. - */ - type: 'input_text'; - } - } - - export interface OutputMessage { - content: OutputMessage.Content; - - /** - * The role of the message. Must be `assistant` for output. - */ - role: 'assistant'; - - /** - * The type of item, which is always `message`. - */ - type: 'message'; - } - - export namespace OutputMessage { - export interface Content { - /** - * The text content. - */ - text: string; - - /** - * The type of content, which is always `output_text`. - */ - type: 'output_text'; - } - } - } - - export interface ItemReference { - /** - * A reference to a variable in the "item" namespace. Ie, "item.name" - */ - item_reference: string; - - /** - * The type of input messages. Always `item_reference`. - */ - type: 'item_reference'; - } - export interface FileContent { /** * The content of the jsonl file. @@ -240,20 +146,25 @@ export namespace CreateEvalCompletionsRunDataSource { * A StoredCompletionsRunDataSource configuration describing a set of filters */ export interface StoredCompletions { + /** + * The type of source. Always `stored_completions`. + */ + type: 'stored_completions'; + /** * An optional Unix timestamp to filter items created after this time. */ - created_after: number | null; + created_after?: number | null; /** * An optional Unix timestamp to filter items created before this time. */ - created_before: number | null; + created_before?: number | null; /** * An optional maximum number of items to return. */ - limit: number | null; + limit?: number | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -263,17 +174,81 @@ export namespace CreateEvalCompletionsRunDataSource { * Keys are strings with a maximum length of 64 characters. Values are strings with * a maximum length of 512 characters. */ - metadata: Shared.Metadata | null; + metadata?: Shared.Metadata | null; /** * An optional model to filter by (e.g., 'gpt-4o'). */ - model: string | null; + model?: string | null; + } + export interface Template { /** - * The type of source. Always `stored_completions`. + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. */ - type: 'stored_completions'; + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Message { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Message.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Message { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; } export interface SamplingParams { @@ -378,7 +353,10 @@ export interface RunCreateResponse { /** * Information about the run's data source. */ - data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunCreateResponse.Completions; /** * An object representing an error response from the Eval API. @@ -442,162 +420,240 @@ export interface RunCreateResponse { } export namespace RunCreateResponse { - export interface PerModelUsage { - /** - * The number of tokens retrieved from cache. - */ - cached_tokens: number; - + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Completions { /** - * The number of completion tokens generated. + * A EvalResponsesSource object describing a run data source configuration. */ - completion_tokens: number; + source: Completions.FileContent | Completions.FileID | Completions.Responses; /** - * The number of invocations. + * The type of run data source. Always `completions`. */ - invocation_count: number; + type: 'completions'; - /** - * The name of the model. - */ - model_name: string; + input_messages?: Completions.Template | Completions.ItemReference; /** - * The number of prompt tokens used. + * The name of the model to use for generating completions (e.g. "o3-mini"). */ - prompt_tokens: number; + model?: string; - /** - * The total number of tokens used. - */ - total_tokens: number; + sampling_params?: Completions.SamplingParams; } - export interface PerTestingCriteriaResult { - /** - * Number of tests failed for this criteria. - */ - failed: number; + export namespace Completions { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; - /** - * Number of tests passed for this criteria. - */ - passed: number; + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } - /** - * A description of the testing criteria. - */ - testing_criteria: string; - } + export namespace FileContent { + export interface Content { + item: Record; - /** - * Counters summarizing the outcomes of the evaluation run. - */ - export interface ResultCounts { - /** - * Number of output items that resulted in an error. - */ - errored: number; + sample?: Record; + } + } - /** - * Number of output items that failed to pass the evaluation. - */ - failed: number; + export interface FileID { + /** + * The identifier of the file. + */ + id: string; - /** - * Number of output items that passed the evaluation. - */ - passed: number; + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } /** - * Total number of executed output items. + * A EvalResponsesSource object describing a run data source configuration. */ - total: number; - } -} + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; -/** - * A schema representing an evaluation run. - */ -export interface RunRetrieveResponse { - /** - * Unique identifier for the evaluation run. - */ - id: string; + /** + * Whether to allow parallel tool calls. This is a query parameter used to select + * responses. + */ + allow_parallel_tool_calls?: boolean | null; - /** - * Unix timestamp (in seconds) when the evaluation run was created. - */ - created_at: number; + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; - /** - * Information about the run's data source. - */ - data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; - /** - * An object representing an error response from the Eval API. - */ - error: EvalAPIError; + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; - /** - * The identifier of the associated evaluation. - */ - eval_id: string; + /** + * Optional search string for instructions. This is a query parameter used to + * select responses. + */ + instructions_search?: string | null; - /** - * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format, and - * querying for objects via API or the dashboard. - * - * Keys are strings with a maximum length of 64 characters. Values are strings with - * a maximum length of 512 characters. - */ - metadata: Shared.Metadata | null; + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; - /** - * The model that is evaluated, if applicable. - */ - model: string; + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; - /** - * The name of the evaluation run. - */ - name: string; + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; - /** - * The type of the object. Always "eval.run". - */ - object: 'eval.run'; + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; - /** - * Usage statistics for each model during the evaluation run. - */ - per_model_usage: Array; + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; - /** - * Results per testing criteria applied during the evaluation run. - */ - per_testing_criteria_results: Array; + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } - /** - * The URL to the rendered evaluation run report on the UI dashboard. - */ - report_url: string; + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; - /** - * Counters summarizing the outcomes of the evaluation run. - */ - result_counts: RunRetrieveResponse.ResultCounts; + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } - /** - * The status of the evaluation run. - */ - status: string; -} + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } -export namespace RunRetrieveResponse { export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -676,7 +732,7 @@ export namespace RunRetrieveResponse { /** * A schema representing an evaluation run. */ -export interface RunListResponse { +export interface RunRetrieveResponse { /** * Unique identifier for the evaluation run. */ @@ -690,7 +746,10 @@ export interface RunListResponse { /** * Information about the run's data source. */ - data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunRetrieveResponse.Completions; /** * An object representing an error response from the Eval API. @@ -730,12 +789,12 @@ export interface RunListResponse { /** * Usage statistics for each model during the evaluation run. */ - per_model_usage: Array; + per_model_usage: Array; /** * Results per testing criteria applied during the evaluation run. */ - per_testing_criteria_results: Array; + per_testing_criteria_results: Array; /** * The URL to the rendered evaluation run report on the UI dashboard. @@ -745,7 +804,7 @@ export interface RunListResponse { /** * Counters summarizing the outcomes of the evaluation run. */ - result_counts: RunListResponse.ResultCounts; + result_counts: RunRetrieveResponse.ResultCounts; /** * The status of the evaluation run. @@ -753,7 +812,241 @@ export interface RunListResponse { status: string; } -export namespace RunListResponse { +export namespace RunRetrieveResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Completions { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: Completions.FileContent | Completions.FileID | Completions.Responses; + + /** + * The type of run data source. Always `completions`. + */ + type: 'completions'; + + input_messages?: Completions.Template | Completions.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Completions.SamplingParams; + } + + export namespace Completions { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Whether to allow parallel tool calls. This is a query parameter used to select + * responses. + */ + allow_parallel_tool_calls?: boolean | null; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional search string for instructions. This is a query parameter used to + * select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -829,18 +1122,10 @@ export namespace RunListResponse { } } -export interface RunDeleteResponse { - deleted?: boolean; - - object?: string; - - run_id?: string; -} - /** * A schema representing an evaluation run. */ -export interface RunCancelResponse { +export interface RunListResponse { /** * Unique identifier for the evaluation run. */ @@ -854,7 +1139,10 @@ export interface RunCancelResponse { /** * Information about the run's data source. */ - data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunListResponse.Completions; /** * An object representing an error response from the Eval API. @@ -894,12 +1182,12 @@ export interface RunCancelResponse { /** * Usage statistics for each model during the evaluation run. */ - per_model_usage: Array; + per_model_usage: Array; /** * Results per testing criteria applied during the evaluation run. */ - per_testing_criteria_results: Array; + per_testing_criteria_results: Array; /** * The URL to the rendered evaluation run report on the UI dashboard. @@ -909,7 +1197,7 @@ export interface RunCancelResponse { /** * Counters summarizing the outcomes of the evaluation run. */ - result_counts: RunCancelResponse.ResultCounts; + result_counts: RunListResponse.ResultCounts; /** * The status of the evaluation run. @@ -917,25 +1205,660 @@ export interface RunCancelResponse { status: string; } -export namespace RunCancelResponse { - export interface PerModelUsage { +export namespace RunListResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Completions { /** - * The number of tokens retrieved from cache. + * A EvalResponsesSource object describing a run data source configuration. */ - cached_tokens: number; + source: Completions.FileContent | Completions.FileID | Completions.Responses; /** - * The number of completion tokens generated. + * The type of run data source. Always `completions`. */ - completion_tokens: number; + type: 'completions'; - /** - * The number of invocations. - */ - invocation_count: number; + input_messages?: Completions.Template | Completions.ItemReference; /** - * The name of the model. + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Completions.SamplingParams; + } + + export namespace Completions { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Whether to allow parallel tool calls. This is a query parameter used to select + * responses. + */ + allow_parallel_tool_calls?: boolean | null; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional search string for instructions. This is a query parameter used to + * select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +export interface RunDeleteResponse { + deleted?: boolean; + + object?: string; + + run_id?: string; +} + +/** + * A schema representing an evaluation run. + */ +export interface RunCancelResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunCancelResponse.Completions; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunCancelResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunCancelResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Completions { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: Completions.FileContent | Completions.FileID | Completions.Responses; + + /** + * The type of run data source. Always `completions`. + */ + type: 'completions'; + + input_messages?: Completions.Template | Completions.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Completions.SamplingParams; + } + + export namespace Completions { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Whether to allow parallel tool calls. This is a query parameter used to select + * responses. + */ + allow_parallel_tool_calls?: boolean | null; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional search string for instructions. This is a query parameter used to + * select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. */ model_name: string; @@ -997,7 +1920,10 @@ export interface RunCreateParams { /** * Details about the run's data source. */ - data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunCreateParams.CreateEvalResponsesRunDataSource; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -1015,6 +1941,247 @@ export interface RunCreateParams { name?: string; } +export namespace RunCreateParams { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface CreateEvalResponsesRunDataSource { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: + | CreateEvalResponsesRunDataSource.FileContent + | CreateEvalResponsesRunDataSource.FileID + | CreateEvalResponsesRunDataSource.Responses; + + /** + * The type of run data source. Always `completions`. + */ + type: 'completions'; + + input_messages?: + | CreateEvalResponsesRunDataSource.Template + | CreateEvalResponsesRunDataSource.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: CreateEvalResponsesRunDataSource.SamplingParams; + } + + export namespace CreateEvalResponsesRunDataSource { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Whether to allow parallel tool calls. This is a query parameter used to select + * responses. + */ + allow_parallel_tool_calls?: boolean | null; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional search string for instructions. This is a query parameter used to + * select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } +} + export interface RunListParams extends CursorPageParams { /** * Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for @@ -1023,8 +2190,8 @@ export interface RunListParams extends CursorPageParams { order?: 'asc' | 'desc'; /** - * Filter runs by status. Use "queued" | "in_progress" | "failed" | "completed" | - * "canceled". + * Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed` + * | `canceled`. */ status?: 'queued' | 'in_progress' | 'completed' | 'canceled' | 'failed'; } diff --git a/src/resources/fine-tuning/checkpoints/permissions.ts b/src/resources/fine-tuning/checkpoints/permissions.ts index 500c3de81..e808b2001 100644 --- a/src/resources/fine-tuning/checkpoints/permissions.ts +++ b/src/resources/fine-tuning/checkpoints/permissions.ts @@ -61,9 +61,13 @@ export class Permissions extends APIResource { */ del( fineTunedModelCheckpoint: string, + permissionId: string, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.delete(`/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`, options); + return this._client.delete( + `/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions/${permissionId}`, + options, + ); } } diff --git a/src/resources/images.ts b/src/resources/images.ts index 8e1c6d92e..de1882d30 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -5,7 +5,7 @@ import * as Core from '../core'; export class Images extends APIResource { /** - * Creates a variation of a given image. + * Creates a variation of a given image. This endpoint only supports `dall-e-2`. */ createVariation( body: ImageCreateVariationParams, @@ -15,7 +15,8 @@ export class Images extends APIResource { } /** - * Creates an edited or extended image given an original image and a prompt. + * Creates an edited or extended image given one or more source images and a + * prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`. */ edit(body: ImageEditParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/images/edits', Core.multipartFormRequestOptions({ body, ...options })); @@ -23,6 +24,7 @@ export class Images extends APIResource { /** * Creates an image given a prompt. + * [Learn more](https://platform.openai.com/docs/guides/images). */ generate(body: ImageGenerateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/images/generations', { body, ...options }); @@ -30,33 +32,93 @@ export class Images extends APIResource { } /** - * Represents the url or the content of an image generated by the OpenAI API. + * Represents the content or the URL of an image generated by the OpenAI API. */ export interface Image { /** - * The base64-encoded JSON of the generated image, if `response_format` is - * `b64_json`. + * The base64-encoded JSON of the generated image. Default value for `gpt-image-1`, + * and only present if `response_format` is set to `b64_json` for `dall-e-2` and + * `dall-e-3`. */ b64_json?: string; /** - * The prompt that was used to generate the image, if there was any revision to the - * prompt. + * For `dall-e-3` only, the revised prompt that was used to generate the image. */ revised_prompt?: string; /** - * The URL of the generated image, if `response_format` is `url` (default). + * When using `dall-e-2` or `dall-e-3`, the URL of the generated image if + * `response_format` is set to `url` (default value). Unsupported for + * `gpt-image-1`. */ url?: string; } -export type ImageModel = 'dall-e-2' | 'dall-e-3'; +export type ImageModel = 'dall-e-2' | 'dall-e-3' | 'gpt-image-1'; +/** + * The response from the image generation endpoint. + */ export interface ImagesResponse { + /** + * The Unix timestamp (in seconds) of when the image was created. + */ created: number; - data: Array; + /** + * The list of generated images. + */ + data?: Array; + + /** + * For `gpt-image-1` only, the token usage information for the image generation. + */ + usage?: ImagesResponse.Usage; +} + +export namespace ImagesResponse { + /** + * For `gpt-image-1` only, the token usage information for the image generation. + */ + export interface Usage { + /** + * The number of tokens (images and text) in the input prompt. + */ + input_tokens: number; + + /** + * The input tokens detailed information for the image generation. + */ + input_tokens_details: Usage.InputTokensDetails; + + /** + * The number of image tokens in the output image. + */ + output_tokens: number; + + /** + * The total number of tokens (images and text) used for the image generation. + */ + total_tokens: number; + } + + export namespace Usage { + /** + * The input tokens detailed information for the image generation. + */ + export interface InputTokensDetails { + /** + * The number of image tokens in the input prompt. + */ + image_tokens: number; + + /** + * The number of text tokens in the input prompt. + */ + text_tokens: number; + } + } } export interface ImageCreateVariationParams { @@ -73,8 +135,7 @@ export interface ImageCreateVariationParams { model?: (string & {}) | ImageModel | null; /** - * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - * `n=1` is supported. + * The number of images to generate. Must be between 1 and 10. */ n?: number | null; @@ -101,27 +162,31 @@ export interface ImageCreateVariationParams { export interface ImageEditParams { /** - * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask - * is not provided, image must have transparency, which will be used as the mask. + * The image(s) to edit. Must be a supported image file or an array of images. For + * `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + * 25MB. For `dall-e-2`, you can only provide one image, and it should be a square + * `png` file less than 4MB. */ - image: Core.Uploadable; + image: Core.Uploadable | Array; /** * A text description of the desired image(s). The maximum length is 1000 - * characters. + * characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. */ prompt: string; /** * An additional image whose fully transparent areas (e.g. where alpha is zero) - * indicate where `image` should be edited. Must be a valid PNG file, less than + * indicate where `image` should be edited. If there are multiple images provided, + * the mask will be applied on the first image. Must be a valid PNG file, less than * 4MB, and have the same dimensions as `image`. */ mask?: Core.Uploadable; /** - * The model to use for image generation. Only `dall-e-2` is supported at this - * time. + * The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + * supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + * is used. */ model?: (string & {}) | ImageModel | null; @@ -130,16 +195,25 @@ export interface ImageEditParams { */ n?: number | null; + /** + * The quality of the image that will be generated. `high`, `medium` and `low` are + * only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + * Defaults to `auto`. + */ + quality?: 'standard' | 'low' | 'medium' | 'high' | 'auto' | null; + /** * The format in which the generated images are returned. Must be one of `url` or * `b64_json`. URLs are only valid for 60 minutes after the image has been - * generated. + * generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + * will always return base64-encoded images. */ response_format?: 'url' | 'b64_json' | null; /** - * The size of the generated images. Must be one of `256x256`, `512x512`, or - * `1024x1024`. + * The size of the generated images. Must be one of `1024x1024`, `1536x1024` + * (landscape), `1024x1536` (portrait), or `auto` (default value) for + * `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. */ size?: '256x256' | '512x512' | '1024x1024' | null; @@ -153,16 +227,36 @@ export interface ImageEditParams { export interface ImageGenerateParams { /** - * A text description of the desired image(s). The maximum length is 1000 - * characters for `dall-e-2` and 4000 characters for `dall-e-3`. + * A text description of the desired image(s). The maximum length is 32000 + * characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + * for `dall-e-3`. */ prompt: string; /** - * The model to use for image generation. + * Allows to set transparency for the background of the generated image(s). This + * parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + * `opaque` or `auto` (default value). When `auto` is used, the model will + * automatically determine the best background for the image. + * + * If `transparent`, the output format needs to support transparency, so it should + * be set to either `png` (default value) or `webp`. + */ + background?: 'transparent' | 'opaque' | 'auto' | null; + + /** + * The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + * `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + * `gpt-image-1` is used. */ model?: (string & {}) | ImageModel | null; + /** + * Control the content-moderation level for images generated by `gpt-image-1`. Must + * be either `low` for less restrictive filtering or `auto` (default value). + */ + moderation?: 'low' | 'auto' | null; + /** * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only * `n=1` is supported. @@ -170,31 +264,59 @@ export interface ImageGenerateParams { n?: number | null; /** - * The quality of the image that will be generated. `hd` creates images with finer - * details and greater consistency across the image. This param is only supported - * for `dall-e-3`. + * The compression level (0-100%) for the generated images. This parameter is only + * supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + * defaults to 100. */ - quality?: 'standard' | 'hd'; + output_compression?: number | null; /** - * The format in which the generated images are returned. Must be one of `url` or - * `b64_json`. URLs are only valid for 60 minutes after the image has been - * generated. + * The format in which the generated images are returned. This parameter is only + * supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + */ + output_format?: 'png' | 'jpeg' | 'webp' | null; + + /** + * The quality of the image that will be generated. + * + * - `auto` (default value) will automatically select the best quality for the + * given model. + * - `high`, `medium` and `low` are supported for `gpt-image-1`. + * - `hd` and `standard` are supported for `dall-e-3`. + * - `standard` is the only option for `dall-e-2`. + */ + quality?: 'standard' | 'hd' | 'low' | 'medium' | 'high' | 'auto' | null; + + /** + * The format in which generated images with `dall-e-2` and `dall-e-3` are + * returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + * after the image has been generated. This parameter isn't supported for + * `gpt-image-1` which will always return base64-encoded images. */ response_format?: 'url' | 'b64_json' | null; /** - * The size of the generated images. Must be one of `256x256`, `512x512`, or - * `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or - * `1024x1792` for `dall-e-3` models. + * The size of the generated images. Must be one of `1024x1024`, `1536x1024` + * (landscape), `1024x1536` (portrait), or `auto` (default value) for + * `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + * one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. */ - size?: '256x256' | '512x512' | '1024x1024' | '1792x1024' | '1024x1792' | null; + size?: + | 'auto' + | '1024x1024' + | '1536x1024' + | '1024x1536' + | '256x256' + | '512x512' + | '1792x1024' + | '1024x1792' + | null; /** - * The style of the generated images. Must be one of `vivid` or `natural`. Vivid - * causes the model to lean towards generating hyper-real and dramatic images. - * Natural causes the model to produce more natural, less hyper-real looking - * images. This param is only supported for `dall-e-3`. + * The style of the generated images. This parameter is only supported for + * `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + * towards generating hyper-real and dramatic images. Natural causes the model to + * produce more natural, less hyper-real looking images. */ style?: 'vivid' | 'natural' | null; diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 52dd079fc..771b8daf2 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -2158,6 +2158,160 @@ export namespace ResponseReasoningItem { } } +/** + * Emitted when a new reasoning summary part is added. + */ +export interface ResponseReasoningSummaryPartAddedEvent { + /** + * The ID of the item this summary part is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary part is associated with. + */ + output_index: number; + + /** + * The summary part that was added. + */ + part: ResponseReasoningSummaryPartAddedEvent.Part; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The type of the event. Always `response.reasoning_summary_part.added`. + */ + type: 'response.reasoning_summary_part.added'; +} + +export namespace ResponseReasoningSummaryPartAddedEvent { + /** + * The summary part that was added. + */ + export interface Part { + /** + * The text of the summary part. + */ + text: string; + + /** + * The type of the summary part. Always `summary_text`. + */ + type: 'summary_text'; + } +} + +/** + * Emitted when a reasoning summary part is completed. + */ +export interface ResponseReasoningSummaryPartDoneEvent { + /** + * The ID of the item this summary part is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary part is associated with. + */ + output_index: number; + + /** + * The completed summary part. + */ + part: ResponseReasoningSummaryPartDoneEvent.Part; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The type of the event. Always `response.reasoning_summary_part.done`. + */ + type: 'response.reasoning_summary_part.done'; +} + +export namespace ResponseReasoningSummaryPartDoneEvent { + /** + * The completed summary part. + */ + export interface Part { + /** + * The text of the summary part. + */ + text: string; + + /** + * The type of the summary part. Always `summary_text`. + */ + type: 'summary_text'; + } +} + +/** + * Emitted when a delta is added to a reasoning summary text. + */ +export interface ResponseReasoningSummaryTextDeltaEvent { + /** + * The text delta that was added to the summary. + */ + delta: string; + + /** + * The ID of the item this summary text delta is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary text delta is associated with. + */ + output_index: number; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The type of the event. Always `response.reasoning_summary_text.delta`. + */ + type: 'response.reasoning_summary_text.delta'; +} + +/** + * Emitted when a reasoning summary text is completed. + */ +export interface ResponseReasoningSummaryTextDoneEvent { + /** + * The ID of the item this summary text is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary text is associated with. + */ + output_index: number; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The full text of the completed reasoning summary. + */ + text: string; + + /** + * The type of the event. Always `response.reasoning_summary_text.done`. + */ + type: 'response.reasoning_summary_text.done'; +} + /** * Emitted when there is a partial refusal text. */ @@ -2252,6 +2406,10 @@ export type ResponseStreamEvent = | ResponseIncompleteEvent | ResponseOutputItemAddedEvent | ResponseOutputItemDoneEvent + | ResponseReasoningSummaryPartAddedEvent + | ResponseReasoningSummaryPartDoneEvent + | ResponseReasoningSummaryTextDeltaEvent + | ResponseReasoningSummaryTextDoneEvent | ResponseRefusalDeltaEvent | ResponseRefusalDoneEvent | ResponseTextAnnotationDeltaEvent @@ -2967,6 +3125,10 @@ export declare namespace Responses { type ResponseOutputRefusal as ResponseOutputRefusal, type ResponseOutputText as ResponseOutputText, type ResponseReasoningItem as ResponseReasoningItem, + type ResponseReasoningSummaryPartAddedEvent as ResponseReasoningSummaryPartAddedEvent, + type ResponseReasoningSummaryPartDoneEvent as ResponseReasoningSummaryPartDoneEvent, + type ResponseReasoningSummaryTextDeltaEvent as ResponseReasoningSummaryTextDeltaEvent, + type ResponseReasoningSummaryTextDoneEvent as ResponseReasoningSummaryTextDoneEvent, type ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent, type ResponseRefusalDoneEvent as ResponseRefusalDoneEvent, type ResponseStatus as ResponseStatus, diff --git a/tests/api-resources/evals/evals.test.ts b/tests/api-resources/evals/evals.test.ts index fabc2602a..45d1c4f9b 100644 --- a/tests/api-resources/evals/evals.test.ts +++ b/tests/api-resources/evals/evals.test.ts @@ -47,7 +47,6 @@ describe('resource evals', () => { ], metadata: { foo: 'string' }, name: 'name', - share_with_openai: true, }); }); diff --git a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts index e7aceae3e..1e4b40a94 100644 --- a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts +++ b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts @@ -61,10 +61,10 @@ describe('resource permissions', () => { ).rejects.toThrow(OpenAI.NotFoundError); }); - // OpenAPI spec is slightly incorrect - test.skip('del', async () => { + test('del', async () => { const responsePromise = client.fineTuning.checkpoints.permissions.del( 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + 'cp_zc4Q7MP6XxulcVzj4MZdwsAB', ); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -75,13 +75,14 @@ describe('resource permissions', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // OpenAPI spec is slightly incorrect - test.skip('del: request options instead of params are passed correctly', async () => { + test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.fineTuning.checkpoints.permissions.del('ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', { - path: '/_stainless_unknown_path', - }), + client.fineTuning.checkpoints.permissions.del( + 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + 'cp_zc4Q7MP6XxulcVzj4MZdwsAB', + { path: '/_stainless_unknown_path' }, + ), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts index 4f15e20ac..e9b460254 100644 --- a/tests/api-resources/images.test.ts +++ b/tests/api-resources/images.test.ts @@ -54,6 +54,7 @@ describe('resource images', () => { mask: await toFile(Buffer.from('# my file contents'), 'README.md'), model: 'string', n: 1, + quality: 'high', response_format: 'url', size: '1024x1024', user: 'user-1234', @@ -74,9 +75,13 @@ describe('resource images', () => { test('generate: required and optional params', async () => { const response = await client.images.generate({ prompt: 'A cute baby sea otter', + background: 'transparent', model: 'string', + moderation: 'low', n: 1, - quality: 'standard', + output_compression: 100, + output_format: 'png', + quality: 'medium', response_format: 'url', size: '1024x1024', style: 'vivid', From f99e7c3ca9348a8611665b54a68a9e377dc61ea9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Apr 2025 16:32:08 +0000 Subject: [PATCH 224/246] release: 4.96.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 18 ++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 22 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 2f61d58b0..5b0015f5b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.95.1" + ".": "4.96.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f864e203..47717a4eb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## 4.96.0 (2025-04-23) + +Full Changelog: [v4.95.1...v4.96.0](https://github.com/openai/openai-node/compare/v4.95.1...v4.96.0) + +### Features + +* **api:** adding new image model support ([a00d331](https://github.com/openai/openai-node/commit/a00d33190edd08df7d9c088c00ab7b77673f88ba)) + + +### Bug Fixes + +* **types:** export AssistantStream ([#1472](https://github.com/openai/openai-node/issues/1472)) ([626c844](https://github.com/openai/openai-node/commit/626c844a758a68ffbff48873d4773be2e3868952)) + + +### Chores + +* **ci:** add timeout thresholds for CI jobs ([e465063](https://github.com/openai/openai-node/commit/e46506351097f1de39c866c28b6ec20fa724fc36)) + ## 4.95.1 (2025-04-18) Full Changelog: [v4.95.0...v4.95.1](https://github.com/openai/openai-node/compare/v4.95.0...v4.95.1) diff --git a/jsr.json b/jsr.json index 8271c8522..6b574ce15 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.95.1", + "version": "4.96.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 76fe7d4d0..7b4e86f8e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.95.1", + "version": "4.96.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index cd1995322..1215a5e79 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.95.1'; // x-release-please-version +export const VERSION = '4.96.0'; // x-release-please-version From ead76fc6429ac52a1c8b008ac5c0afcefaa0bae5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Apr 2025 17:55:23 +0000 Subject: [PATCH 225/246] chore(ci): run on more branches and use depot runners --- .github/workflows/ci.yml | 20 ++++++++++---------- .github/workflows/publish-jsr.yml | 2 +- .github/workflows/publish-npm.yml | 2 +- .github/workflows/release-doctor.yml | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b0aac41b2..9b293f4c2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,18 +1,18 @@ name: CI on: push: - branches: - - master - pull_request: - branches: - - master - - next + branches-ignore: + - 'generated' + - 'codegen/**' + - 'integrated/**' + - 'stl-preview-head/**' + - 'stl-preview-base/**' jobs: lint: timeout-minutes: 10 name: lint - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - uses: actions/checkout@v4 @@ -30,7 +30,7 @@ jobs: build: timeout-minutes: 5 name: build - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 permissions: contents: read id-token: write @@ -65,7 +65,7 @@ jobs: test: timeout-minutes: 10 name: test - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - uses: actions/checkout@v4 @@ -83,7 +83,7 @@ jobs: examples: timeout-minutes: 10 name: examples - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 if: github.repository == 'openai/openai-node' steps: diff --git a/.github/workflows/publish-jsr.yml b/.github/workflows/publish-jsr.yml index 1e46d6bfb..efb18bb16 100644 --- a/.github/workflows/publish-jsr.yml +++ b/.github/workflows/publish-jsr.yml @@ -7,7 +7,7 @@ on: jobs: publish: name: publish - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 permissions: contents: read id-token: write diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml index 5a3711b53..cf1d07e09 100644 --- a/.github/workflows/publish-npm.yml +++ b/.github/workflows/publish-npm.yml @@ -7,7 +7,7 @@ on: jobs: publish: name: publish - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 environment: publish steps: diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 37bc09e80..1c794642c 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -8,7 +8,7 @@ on: jobs: release_doctor: name: release doctor - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 environment: publish if: github.repository == 'openai/openai-node' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') From 214da398c76f46d40994665f3ca7e10e203e9579 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Apr 2025 19:58:52 +0000 Subject: [PATCH 226/246] chore(ci): only use depot for staging repos --- .github/workflows/ci.yml | 8 ++++---- .github/workflows/publish-jsr.yml | 2 +- .github/workflows/publish-npm.yml | 2 +- .github/workflows/release-doctor.yml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9b293f4c2..49a043930 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,7 @@ jobs: lint: timeout-minutes: 10 name: lint - runs-on: depot-ubuntu-24.04 + runs-on: ${{ github.repository == 'stainless-sdks/openai-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 @@ -30,7 +30,7 @@ jobs: build: timeout-minutes: 5 name: build - runs-on: depot-ubuntu-24.04 + runs-on: ${{ github.repository == 'stainless-sdks/openai-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} permissions: contents: read id-token: write @@ -65,7 +65,7 @@ jobs: test: timeout-minutes: 10 name: test - runs-on: depot-ubuntu-24.04 + runs-on: ${{ github.repository == 'stainless-sdks/openai-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 @@ -83,7 +83,7 @@ jobs: examples: timeout-minutes: 10 name: examples - runs-on: depot-ubuntu-24.04 + runs-on: ${{ github.repository == 'stainless-sdks/openai-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} if: github.repository == 'openai/openai-node' steps: diff --git a/.github/workflows/publish-jsr.yml b/.github/workflows/publish-jsr.yml index efb18bb16..1e46d6bfb 100644 --- a/.github/workflows/publish-jsr.yml +++ b/.github/workflows/publish-jsr.yml @@ -7,7 +7,7 @@ on: jobs: publish: name: publish - runs-on: depot-ubuntu-24.04 + runs-on: ubuntu-latest permissions: contents: read id-token: write diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml index cf1d07e09..5a3711b53 100644 --- a/.github/workflows/publish-npm.yml +++ b/.github/workflows/publish-npm.yml @@ -7,7 +7,7 @@ on: jobs: publish: name: publish - runs-on: depot-ubuntu-24.04 + runs-on: ubuntu-latest environment: publish steps: diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 1c794642c..37bc09e80 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -8,7 +8,7 @@ on: jobs: release_doctor: name: release doctor - runs-on: depot-ubuntu-24.04 + runs-on: ubuntu-latest environment: publish if: github.repository == 'openai/openai-node' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') From 3e7c92c8a76c1f747610d63d9d69a88b796ee9fc Mon Sep 17 00:00:00 2001 From: Isaac Batista Date: Mon, 28 Apr 2025 12:31:09 -0300 Subject: [PATCH 227/246] fix(types): export ParseableToolsParams (#1486) --- src/lib/ResponsesParser.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/ResponsesParser.ts b/src/lib/ResponsesParser.ts index 8d762d5bb..c64c6ffa0 100644 --- a/src/lib/ResponsesParser.ts +++ b/src/lib/ResponsesParser.ts @@ -14,7 +14,7 @@ import { } from '../resources/responses/responses'; import { type AutoParseableTextFormat, isAutoParsableResponseFormat } from '../lib/parser'; -type ParseableToolsParams = Array | ChatCompletionTool | null; +export type ParseableToolsParams = Array | ChatCompletionTool | null; export type ResponseCreateParamsWithTools = ResponseCreateParamsBase & { tools?: ParseableToolsParams; From 593fea4f86d46035540f5a34314d6f4b31960dd2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 29 Apr 2025 05:07:54 +0000 Subject: [PATCH 228/246] release: 4.96.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 5b0015f5b..20d7ece71 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.96.0" + ".": "4.96.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 47717a4eb..3a78e5928 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.96.1 (2025-04-29) + +Full Changelog: [v4.96.0...v4.96.1](https://github.com/openai/openai-node/compare/v4.96.0...v4.96.1) + +### Bug Fixes + +* **types:** export ParseableToolsParams ([#1486](https://github.com/openai/openai-node/issues/1486)) ([eb055b2](https://github.com/openai/openai-node/commit/eb055b26ce90e5fe1b101a95a4390956d519e168)) + + +### Chores + +* **ci:** only use depot for staging repos ([e80af47](https://github.com/openai/openai-node/commit/e80af47590056baa8f456e8d60c37f0d00ff08c4)) +* **ci:** run on more branches and use depot runners ([b04a801](https://github.com/openai/openai-node/commit/b04a801d0356105eacddbb4d10f4359719585dd6)) + ## 4.96.0 (2025-04-23) Full Changelog: [v4.95.1...v4.96.0](https://github.com/openai/openai-node/compare/v4.95.1...v4.96.0) diff --git a/jsr.json b/jsr.json index 6b574ce15..73ea2185e 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.96.0", + "version": "4.96.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 7b4e86f8e..54046d4f2 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.96.0", + "version": "4.96.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 1215a5e79..45539224e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.96.0'; // x-release-please-version +export const VERSION = '4.96.1'; // x-release-please-version From 37ab638baee4f68f3149f4a4f96a136f32966739 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 29 Apr 2025 17:08:58 +0000 Subject: [PATCH 229/246] release: 4.96.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 20d7ece71..88f780d30 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.96.1" + ".": "4.96.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a78e5928..d724d8922 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.96.2 (2025-04-29) + +Full Changelog: [v4.96.1...v4.96.2](https://github.com/openai/openai-node/compare/v4.96.1...v4.96.2) + +### Bug Fixes + +* **types:** export ParseableToolsParams ([#1486](https://github.com/openai/openai-node/issues/1486)) ([3e7c92c](https://github.com/openai/openai-node/commit/3e7c92c8a76c1f747610d63d9d69a88b796ee9fc)) + + +### Chores + +* **ci:** only use depot for staging repos ([214da39](https://github.com/openai/openai-node/commit/214da398c76f46d40994665f3ca7e10e203e9579)) +* **ci:** run on more branches and use depot runners ([ead76fc](https://github.com/openai/openai-node/commit/ead76fc6429ac52a1c8b008ac5c0afcefaa0bae5)) + ## 4.96.1 (2025-04-29) Full Changelog: [v4.96.0...v4.96.1](https://github.com/openai/openai-node/compare/v4.96.0...v4.96.1) diff --git a/jsr.json b/jsr.json index 73ea2185e..8eca06e74 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.96.1", + "version": "4.96.2", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 54046d4f2..d563394c0 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.96.1", + "version": "4.96.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 45539224e..1674d74fe 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.96.1'; // x-release-please-version +export const VERSION = '4.96.2'; // x-release-please-version From 0989ddcfd5ed0a149bbc67d61f93e0f49c397c72 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 30 Apr 2025 15:45:48 +0000 Subject: [PATCH 230/246] docs(readme): fix typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8515c81ed..bbf72226a 100644 --- a/README.md +++ b/README.md @@ -151,7 +151,7 @@ async function main() { main(); ``` -Error codes are as followed: +Error codes are as follows: | Status Code | Error Type | | ----------- | -------------------------- | From 995075b632051b5bb33c0381056107b2fe93931e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 30 Apr 2025 22:11:59 +0000 Subject: [PATCH 231/246] chore(docs): add missing deprecation warnings --- src/resources/chat/completions/completions.ts | 8 ++++---- src/resources/fine-tuning/jobs/jobs.ts | 5 +++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 17edac02c..251020337 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -1169,7 +1169,7 @@ export interface ChatCompletionCreateParamsBase { frequency_penalty?: number | null; /** - * Deprecated in favor of `tool_choice`. + * @deprecated Deprecated in favor of `tool_choice`. * * Controls which (if any) function is called by the model. * @@ -1187,7 +1187,7 @@ export interface ChatCompletionCreateParamsBase { function_call?: 'none' | 'auto' | ChatCompletionFunctionCallOption; /** - * Deprecated in favor of `tools`. + * @deprecated Deprecated in favor of `tools`. * * A list of functions the model may generate JSON inputs for. */ @@ -1220,8 +1220,8 @@ export interface ChatCompletionCreateParamsBase { max_completion_tokens?: number | null; /** - * The maximum number of [tokens](/tokenizer) that can be generated in the chat - * completion. This value can be used to control + * @deprecated The maximum number of [tokens](/tokenizer) that can be generated in + * the chat completion. This value can be used to control * [costs](https://openai.com/api/pricing/) for text generated via API. * * This value is now deprecated in favor of `max_completion_tokens`, and is not diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 9be03c302..2198e8174 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -466,8 +466,9 @@ export interface JobCreateParams { training_file: string; /** - * The hyperparameters used for the fine-tuning job. This value is now deprecated - * in favor of `method`, and should be passed in under the `method` parameter. + * @deprecated The hyperparameters used for the fine-tuning job. This value is now + * deprecated in favor of `method`, and should be passed in under the `method` + * parameter. */ hyperparameters?: JobCreateParams.Hyperparameters; From dfbdc65d3ed17f0063d02906239371b88e04e5fd Mon Sep 17 00:00:00 2001 From: mini-peanut Date: Fri, 2 May 2025 09:00:11 +0800 Subject: [PATCH 232/246] docs: fix "procesing" -> "processing" in realtime examples (#1406) --- examples/azure/realtime/websocket.ts | 4 ++-- examples/azure/realtime/ws.ts | 4 ++-- examples/realtime/websocket.ts | 4 ++-- examples/realtime/ws.ts | 4 ++-- realtime.md | 7 +++---- 5 files changed, 11 insertions(+), 12 deletions(-) diff --git a/examples/azure/realtime/websocket.ts b/examples/azure/realtime/websocket.ts index bec74e654..4175b4a71 100644 --- a/examples/azure/realtime/websocket.ts +++ b/examples/azure/realtime/websocket.ts @@ -40,7 +40,7 @@ async function main() { rt.on('error', (err) => { // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors + // likely want to continue processing events regardless of any errors throw err; }); @@ -57,4 +57,4 @@ async function main() { rt.socket.addEventListener('close', () => console.log('\nConnection closed!')); } -main(); +main(); \ No newline at end of file diff --git a/examples/azure/realtime/ws.ts b/examples/azure/realtime/ws.ts index 6ab7b742a..e86a79092 100644 --- a/examples/azure/realtime/ws.ts +++ b/examples/azure/realtime/ws.ts @@ -40,7 +40,7 @@ async function main() { rt.on('error', (err) => { // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors + // likely want to continue processing events regardless of any errors throw err; }); @@ -57,4 +57,4 @@ async function main() { rt.socket.on('close', () => console.log('\nConnection closed!')); } -main(); +main(); \ No newline at end of file diff --git a/examples/realtime/websocket.ts b/examples/realtime/websocket.ts index 0da131bc3..f1c46dd41 100644 --- a/examples/realtime/websocket.ts +++ b/examples/realtime/websocket.ts @@ -28,7 +28,7 @@ async function main() { rt.on('error', (err) => { // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors + // likely want to continue processing events regardless of any errors throw err; }); @@ -45,4 +45,4 @@ async function main() { rt.socket.addEventListener('close', () => console.log('\nConnection closed!')); } -main(); +main(); \ No newline at end of file diff --git a/examples/realtime/ws.ts b/examples/realtime/ws.ts index 08c6fbcb6..1ce6b2045 100644 --- a/examples/realtime/ws.ts +++ b/examples/realtime/ws.ts @@ -28,7 +28,7 @@ async function main() { rt.on('error', (err) => { // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors + // likely want to continue processing events regardless of any errors throw err; }); @@ -45,4 +45,4 @@ async function main() { rt.socket.on('close', () => console.log('\nConnection closed!')); } -main(); +main(); \ No newline at end of file diff --git a/realtime.md b/realtime.md index 2fcd17e9e..7e8d84a3c 100644 --- a/realtime.md +++ b/realtime.md @@ -39,7 +39,7 @@ rt.socket.on('open', () => { rt.on('error', (err) => { // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors + // likely want to continue processing events regardless of any errors throw err; }); @@ -80,8 +80,7 @@ It is **highly recommended** that you register an `error` event listener and han const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); rt.on('error', (err) => { // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors + // likely want to continue processing events regardless of any errors throw err; }); -``` - +``` \ No newline at end of file From 31cd88fae84f630c8e86e1acab6c4cd9283c886c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 2 May 2025 19:09:26 +0000 Subject: [PATCH 233/246] feat(api): add image sizes, reasoning encryption --- .stats.yml | 6 +-- src/resources/audio/speech.ts | 2 +- src/resources/images.ts | 24 +++++++-- src/resources/responses/responses.ts | 79 ++++++++++++++++++---------- tests/api-resources/images.test.ts | 1 + 5 files changed, 74 insertions(+), 38 deletions(-) diff --git a/.stats.yml b/.stats.yml index d92408173..0c8278866 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-8b68ae6b807dca92e914da1dd9e835a20f69b075e79102a264367fd7fddddb33.yml -openapi_spec_hash: b6ade5b1a6327339e6669e1134de2d03 -config_hash: b597cd9a31e9e5ec709e2eefb4c54122 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-0ee6b36cf3cc278cef4199a6aec5f7d530a6c1f17a74830037e96d50ca1edc50.yml +openapi_spec_hash: e8ec5f46bc0655b34f292422d58a60f6 +config_hash: d9b6b6e6bc85744663e300eebc482067 diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 4b99ee5f4..e218c8299 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -66,7 +66,7 @@ export interface SpeechCreateParams { /** * The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - * the default. + * the default. Does not work with `gpt-4o-mini-tts`. */ speed?: number; } diff --git a/src/resources/images.ts b/src/resources/images.ts index de1882d30..32f1e123c 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -162,10 +162,13 @@ export interface ImageCreateVariationParams { export interface ImageEditParams { /** - * The image(s) to edit. Must be a supported image file or an array of images. For - * `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than - * 25MB. For `dall-e-2`, you can only provide one image, and it should be a square - * `png` file less than 4MB. + * The image(s) to edit. Must be a supported image file or an array of images. + * + * For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + * 25MB. You can provide up to 16 images. + * + * For `dall-e-2`, you can only provide one image, and it should be a square `png` + * file less than 4MB. */ image: Core.Uploadable | Array; @@ -175,6 +178,17 @@ export interface ImageEditParams { */ prompt: string; + /** + * Allows to set transparency for the background of the generated image(s). This + * parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + * `opaque` or `auto` (default value). When `auto` is used, the model will + * automatically determine the best background for the image. + * + * If `transparent`, the output format needs to support transparency, so it should + * be set to either `png` (default value) or `webp`. + */ + background?: 'transparent' | 'opaque' | 'auto' | null; + /** * An additional image whose fully transparent areas (e.g. where alpha is zero) * indicate where `image` should be edited. If there are multiple images provided, @@ -215,7 +229,7 @@ export interface ImageEditParams { * (landscape), `1024x1536` (portrait), or `auto` (default value) for * `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. */ - size?: '256x256' | '512x512' | '1024x1024' | null; + size?: '256x256' | '512x512' | '1024x1024' | '1536x1024' | '1024x1536' | 'auto' | null; /** * A unique identifier representing your end-user, which can help OpenAI to monitor diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 771b8daf2..0a6e3666d 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -158,7 +158,7 @@ export interface ComputerTool { /** * The type of computer environment to control. */ - environment: 'mac' | 'windows' | 'ubuntu' | 'browser'; + environment: 'windows' | 'mac' | 'linux' | 'ubuntu' | 'browser'; /** * The type of the computer use tool. Always `computer_use_preview`. @@ -209,9 +209,9 @@ export interface FileSearchTool { vector_store_ids: Array; /** - * A filter to apply based on file attributes. + * A filter to apply. */ - filters?: Shared.ComparisonFilter | Shared.CompoundFilter; + filters?: Shared.ComparisonFilter | Shared.CompoundFilter | null; /** * The maximum number of results to return. This number should be between 1 and 50 @@ -258,12 +258,12 @@ export interface FunctionTool { /** * A JSON schema object describing the parameters of the function. */ - parameters: Record; + parameters: Record | null; /** * Whether to enforce strict parameter validation. Default `true`. */ - strict: boolean; + strict: boolean | null; /** * The type of the function tool. Always `function`. @@ -1581,11 +1581,17 @@ export interface ResponseInProgressEvent { * - `message.input_image.image_url`: Include image urls from the input message. * - `computer_call_output.output.image_url`: Include image urls from the computer * call output. + * - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + * tokens in reasoning item outputs. This enables reasoning items to be used in + * multi-turn conversations when using the Responses API statelessly (like when + * the `store` parameter is set to `false`, or when an organization is enrolled + * in the zero data retention program). */ export type ResponseIncludable = | 'file_search_call.results' | 'message.input_image.image_url' - | 'computer_call_output.output.image_url'; + | 'computer_call_output.output.image_url' + | 'reasoning.encrypted_content'; /** * An event that is emitted when a response finishes as incomplete. @@ -1650,7 +1656,7 @@ export interface ResponseInputFile { /** * The ID of the file to be sent to the model. */ - file_id?: string; + file_id?: string | null; /** * The name of the file to be sent to the model. @@ -1667,7 +1673,7 @@ export interface ResponseInputImage { * The detail level of the image to be sent to the model. One of `high`, `low`, or * `auto`. Defaults to `auto`. */ - detail: 'high' | 'low' | 'auto'; + detail: 'low' | 'high' | 'auto'; /** * The type of the input item. Always `input_image`. @@ -1758,19 +1764,19 @@ export namespace ResponseInputItem { /** * The ID of the computer tool call output. */ - id?: string; + id?: string | null; /** * The safety checks reported by the API that have been acknowledged by the * developer. */ - acknowledged_safety_checks?: Array; + acknowledged_safety_checks?: Array | null; /** * The status of the message input. One of `in_progress`, `completed`, or * `incomplete`. Populated when input items are returned via API. */ - status?: 'in_progress' | 'completed' | 'incomplete'; + status?: 'in_progress' | 'completed' | 'incomplete' | null; } export namespace ComputerCallOutput { @@ -1786,12 +1792,12 @@ export namespace ResponseInputItem { /** * The type of the pending safety check. */ - code: string; + code?: string | null; /** * Details about the pending safety check. */ - message: string; + message?: string | null; } } @@ -1818,13 +1824,13 @@ export namespace ResponseInputItem { * The unique ID of the function tool call output. Populated when this item is * returned via API. */ - id?: string; + id?: string | null; /** * The status of the item. One of `in_progress`, `completed`, or `incomplete`. * Populated when items are returned via API. */ - status?: 'in_progress' | 'completed' | 'incomplete'; + status?: 'in_progress' | 'completed' | 'incomplete' | null; } /** @@ -1839,7 +1845,7 @@ export namespace ResponseInputItem { /** * The type of item to reference. Always `item_reference`. */ - type: 'item_reference'; + type?: 'item_reference' | null; } } @@ -2119,7 +2125,9 @@ export namespace ResponseOutputText { /** * A description of the chain of thought used by a reasoning model while generating - * a response. + * a response. Be sure to include these items in your `input` to the Responses API + * for subsequent turns of a conversation if you are manually + * [managing context](https://platform.openai.com/docs/guides/conversation-state). */ export interface ResponseReasoningItem { /** @@ -2137,6 +2145,12 @@ export interface ResponseReasoningItem { */ type: 'reasoning'; + /** + * The encrypted content of the reasoning item - populated when a response is + * generated with `reasoning.encrypted_content` in the `include` parameter. + */ + encrypted_content?: string | null; + /** * The status of the item. One of `in_progress`, `completed`, or `incomplete`. * Populated when items are returned via API. @@ -2730,11 +2744,9 @@ export interface ResponseWebSearchCallSearchingEvent { } /** - * A tool that searches for relevant content from uploaded files. Learn more about - * the - * [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + * A tool that can be used to generate a response. */ -export type Tool = FileSearchTool | FunctionTool | ComputerTool | WebSearchTool; +export type Tool = FileSearchTool | FunctionTool | WebSearchTool | ComputerTool; /** * Use this option to force the model to call a specific function. @@ -2788,10 +2800,8 @@ export interface ToolChoiceTypes { */ export interface WebSearchTool { /** - * The type of the web search tool. One of: - * - * - `web_search_preview` - * - `web_search_preview_2025_03_11` + * The type of the web search tool. One of `web_search_preview` or + * `web_search_preview_2025_03_11`. */ type: 'web_search_preview' | 'web_search_preview_2025_03_11'; @@ -2801,10 +2811,16 @@ export interface WebSearchTool { */ search_context_size?: 'low' | 'medium' | 'high'; + /** + * The user's location. + */ user_location?: WebSearchTool.UserLocation | null; } export namespace WebSearchTool { + /** + * The user's location. + */ export interface UserLocation { /** * The type of location approximation. Always `approximate`. @@ -2814,24 +2830,24 @@ export namespace WebSearchTool { /** * Free text input for the city of the user, e.g. `San Francisco`. */ - city?: string; + city?: string | null; /** * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of * the user, e.g. `US`. */ - country?: string; + country?: string | null; /** * Free text input for the region of the user, e.g. `California`. */ - region?: string; + region?: string | null; /** * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the * user, e.g. `America/Los_Angeles`. */ - timezone?: string; + timezone?: string | null; } } @@ -2869,6 +2885,11 @@ export interface ResponseCreateParamsBase { * - `message.input_image.image_url`: Include image urls from the input message. * - `computer_call_output.output.image_url`: Include image urls from the computer * call output. + * - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + * tokens in reasoning item outputs. This enables reasoning items to be used in + * multi-turn conversations when using the Responses API statelessly (like when + * the `store` parameter is set to `false`, or when an organization is enrolled + * in the zero data retention program). */ include?: Array | null; diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts index e9b460254..04fca0a2a 100644 --- a/tests/api-resources/images.test.ts +++ b/tests/api-resources/images.test.ts @@ -51,6 +51,7 @@ describe('resource images', () => { const response = await client.images.edit({ image: await toFile(Buffer.from('# my file contents'), 'README.md'), prompt: 'A cute baby sea otter wearing a beret', + background: 'transparent', mask: await toFile(Buffer.from('# my file contents'), 'README.md'), model: 'string', n: 1, From 5bb454391f34c6c0d9e8b3b22d0e407c31641bfa Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 2 May 2025 19:10:25 +0000 Subject: [PATCH 234/246] release: 4.97.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 23 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 88f780d30..4e19f03d6 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.96.2" + ".": "4.97.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d724d8922..6fa637742 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 4.97.0 (2025-05-02) + +Full Changelog: [v4.96.2...v4.97.0](https://github.com/openai/openai-node/compare/v4.96.2...v4.97.0) + +### Features + +* **api:** add image sizes, reasoning encryption ([9c2113a](https://github.com/openai/openai-node/commit/9c2113af7c7ea9a797a0e39d07d9ad8627c96acb)) + + +### Chores + +* **docs:** add missing deprecation warnings ([253392c](https://github.com/openai/openai-node/commit/253392c93adca88e0ee83f784183b2128ff64a16)) + + +### Documentation + +* fix "procesing" -> "processing" in realtime examples ([#1406](https://github.com/openai/openai-node/issues/1406)) ([8717b9f](https://github.com/openai/openai-node/commit/8717b9fce87d03e51d40ee58f5d6259408405e1f)) +* **readme:** fix typo ([cab3478](https://github.com/openai/openai-node/commit/cab3478f195f9de5c21033a1b3684f52ad347ffc)) + ## 4.96.2 (2025-04-29) Full Changelog: [v4.96.1...v4.96.2](https://github.com/openai/openai-node/compare/v4.96.1...v4.96.2) diff --git a/jsr.json b/jsr.json index 8eca06e74..fd3ca4a41 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.96.2", + "version": "4.97.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index d563394c0..8b9281b35 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.96.2", + "version": "4.97.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 1674d74fe..97cbc5900 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.96.2'; // x-release-please-version +export const VERSION = '4.97.0'; // x-release-please-version From 6469d5323b653f19e90a7470d81c914c640c6f8b Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 5 May 2025 09:32:07 +0100 Subject: [PATCH 235/246] chore(internal): fix formatting --- examples/azure/realtime/websocket.ts | 2 +- examples/azure/realtime/ws.ts | 2 +- examples/realtime/websocket.ts | 2 +- examples/realtime/ws.ts | 2 +- examples/tsconfig.json | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/azure/realtime/websocket.ts b/examples/azure/realtime/websocket.ts index 4175b4a71..91fe3b7b9 100644 --- a/examples/azure/realtime/websocket.ts +++ b/examples/azure/realtime/websocket.ts @@ -57,4 +57,4 @@ async function main() { rt.socket.addEventListener('close', () => console.log('\nConnection closed!')); } -main(); \ No newline at end of file +main(); diff --git a/examples/azure/realtime/ws.ts b/examples/azure/realtime/ws.ts index e86a79092..8b22aeef0 100644 --- a/examples/azure/realtime/ws.ts +++ b/examples/azure/realtime/ws.ts @@ -57,4 +57,4 @@ async function main() { rt.socket.on('close', () => console.log('\nConnection closed!')); } -main(); \ No newline at end of file +main(); diff --git a/examples/realtime/websocket.ts b/examples/realtime/websocket.ts index f1c46dd41..6fb4740af 100644 --- a/examples/realtime/websocket.ts +++ b/examples/realtime/websocket.ts @@ -45,4 +45,4 @@ async function main() { rt.socket.addEventListener('close', () => console.log('\nConnection closed!')); } -main(); \ No newline at end of file +main(); diff --git a/examples/realtime/ws.ts b/examples/realtime/ws.ts index 1ce6b2045..6cc950b76 100644 --- a/examples/realtime/ws.ts +++ b/examples/realtime/ws.ts @@ -45,4 +45,4 @@ async function main() { rt.socket.on('close', () => console.log('\nConnection closed!')); } -main(); \ No newline at end of file +main(); diff --git a/examples/tsconfig.json b/examples/tsconfig.json index 6c3477462..3c43903cf 100644 --- a/examples/tsconfig.json +++ b/examples/tsconfig.json @@ -1,3 +1,3 @@ { - "extends": "../tsconfig.json" + "extends": "../tsconfig.json" } From bbf5d45259a8bfba62e2217955597ec0f6cfead4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 6 May 2025 19:19:08 +0000 Subject: [PATCH 236/246] chore(ci): bump node version for release workflows --- .github/workflows/ci.yml | 4 ++-- .github/workflows/publish-jsr.yml | 2 +- .github/workflows/publish-npm.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 49a043930..09f1636b6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -72,7 +72,7 @@ jobs: - name: Set up Node uses: actions/setup-node@v4 with: - node-version: '18' + node-version: '20' - name: Bootstrap run: ./scripts/bootstrap @@ -92,7 +92,7 @@ jobs: - name: Set up Node uses: actions/setup-node@v4 with: - node-version: '18' + node-version: '20' - name: Install dependencies run: | yarn install diff --git a/.github/workflows/publish-jsr.yml b/.github/workflows/publish-jsr.yml index 1e46d6bfb..e74673c1f 100644 --- a/.github/workflows/publish-jsr.yml +++ b/.github/workflows/publish-jsr.yml @@ -19,7 +19,7 @@ jobs: - name: Set up Node uses: actions/setup-node@v3 with: - node-version: '18' + node-version: '20' - name: Install dependencies run: | diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml index 5a3711b53..0662a79c5 100644 --- a/.github/workflows/publish-npm.yml +++ b/.github/workflows/publish-npm.yml @@ -16,7 +16,7 @@ jobs: - name: Set up Node uses: actions/setup-node@v3 with: - node-version: '18' + node-version: '20' - name: Install dependencies run: | From e8d2092e51015b05fe7ef33ef5a9d7652846b137 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 6 May 2025 22:24:41 +0000 Subject: [PATCH 237/246] docs: add examples to tsdocs --- src/resources/audio/speech.ts | 12 +++++ src/resources/audio/transcriptions.ts | 9 ++++ src/resources/audio/translations.ts | 8 +++ src/resources/beta/assistants.ts | 36 +++++++++++++ src/resources/beta/realtime/sessions.ts | 6 +++ .../beta/realtime/transcription-sessions.ts | 6 +++ src/resources/beta/threads/messages.ts | 43 +++++++++++++++ src/resources/beta/threads/runs/runs.ts | 52 +++++++++++++++++++ src/resources/beta/threads/runs/steps.ts | 21 ++++++++ src/resources/beta/threads/threads.ts | 33 ++++++++++++ src/resources/chat/completions/completions.ts | 38 ++++++++++++++ src/resources/chat/completions/messages.ts | 10 ++++ src/resources/completions.ts | 8 +++ src/resources/embeddings.ts | 9 ++++ .../fine-tuning/checkpoints/permissions.ts | 28 ++++++++++ src/resources/fine-tuning/jobs/checkpoints.ts | 10 ++++ src/resources/fine-tuning/jobs/jobs.ts | 40 ++++++++++++++ src/resources/images.ts | 22 ++++++++ src/resources/responses/input-items.ts | 10 ++++ src/resources/responses/responses.ts | 22 ++++++++ 20 files changed, 423 insertions(+) diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index e218c8299..ccd37c092 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -7,6 +7,18 @@ import { type Response } from '../../_shims/index'; export class Speech extends APIResource { /** * Generates audio from the input text. + * + * @example + * ```ts + * const speech = await client.audio.speech.create({ + * input: 'input', + * model: 'string', + * voice: 'ash', + * }); + * + * const content = await speech.blob(); + * console.log(content); + * ``` */ create(body: SpeechCreateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/audio/speech', { diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index ba4fec6c5..8d563e0ba 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -9,6 +9,15 @@ import { Stream } from '../../streaming'; export class Transcriptions extends APIResource { /** * Transcribes audio into the input language. + * + * @example + * ```ts + * const transcription = + * await client.audio.transcriptions.create({ + * file: fs.createReadStream('speech.mp3'), + * model: 'gpt-4o-transcribe', + * }); + * ``` */ create( body: TranscriptionCreateParamsNonStreaming<'json' | undefined>, diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index df312f876..1edb71a7d 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -8,6 +8,14 @@ import * as TranscriptionsAPI from './transcriptions'; export class Translations extends APIResource { /** * Translates audio into English. + * + * @example + * ```ts + * const translation = await client.audio.translations.create({ + * file: fs.createReadStream('speech.mp3'), + * model: 'whisper-1', + * }); + * ``` */ create( body: TranslationCreateParams<'json' | undefined>, diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 00a6ff2cf..95581bbc8 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -14,6 +14,13 @@ import { AssistantStream } from '../../lib/AssistantStream'; export class Assistants extends APIResource { /** * Create an assistant with a model and instructions. + * + * @example + * ```ts + * const assistant = await client.beta.assistants.create({ + * model: 'gpt-4o', + * }); + * ``` */ create(body: AssistantCreateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/assistants', { @@ -25,6 +32,13 @@ export class Assistants extends APIResource { /** * Retrieves an assistant. + * + * @example + * ```ts + * const assistant = await client.beta.assistants.retrieve( + * 'assistant_id', + * ); + * ``` */ retrieve(assistantId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/assistants/${assistantId}`, { @@ -35,6 +49,13 @@ export class Assistants extends APIResource { /** * Modifies an assistant. + * + * @example + * ```ts + * const assistant = await client.beta.assistants.update( + * 'assistant_id', + * ); + * ``` */ update( assistantId: string, @@ -50,6 +71,14 @@ export class Assistants extends APIResource { /** * Returns a list of assistants. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const assistant of client.beta.assistants.list()) { + * // ... + * } + * ``` */ list( query?: AssistantListParams, @@ -72,6 +101,13 @@ export class Assistants extends APIResource { /** * Delete an assistant. + * + * @example + * ```ts + * const assistantDeleted = await client.beta.assistants.del( + * 'assistant_id', + * ); + * ``` */ del(assistantId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/assistants/${assistantId}`, { diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts index 28a44431e..a55a2678c 100644 --- a/src/resources/beta/realtime/sessions.ts +++ b/src/resources/beta/realtime/sessions.ts @@ -12,6 +12,12 @@ export class Sessions extends APIResource { * It responds with a session object, plus a `client_secret` key which contains a * usable ephemeral API token that can be used to authenticate browser clients for * the Realtime API. + * + * @example + * ```ts + * const session = + * await client.beta.realtime.sessions.create(); + * ``` */ create(body: SessionCreateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/realtime/sessions', { diff --git a/src/resources/beta/realtime/transcription-sessions.ts b/src/resources/beta/realtime/transcription-sessions.ts index a54ec1125..61e58a8e8 100644 --- a/src/resources/beta/realtime/transcription-sessions.ts +++ b/src/resources/beta/realtime/transcription-sessions.ts @@ -12,6 +12,12 @@ export class TranscriptionSessions extends APIResource { * It responds with a session object, plus a `client_secret` key which contains a * usable ephemeral API token that can be used to authenticate browser clients for * the Realtime API. + * + * @example + * ```ts + * const transcriptionSession = + * await client.beta.realtime.transcriptionSessions.create(); + * ``` */ create( body: TranscriptionSessionCreateParams, diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index 29fd2b29f..c3834ebe6 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -10,6 +10,14 @@ import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Messages extends APIResource { /** * Create a message. + * + * @example + * ```ts + * const message = await client.beta.threads.messages.create( + * 'thread_id', + * { content: 'string', role: 'user' }, + * ); + * ``` */ create( threadId: string, @@ -25,6 +33,14 @@ export class Messages extends APIResource { /** * Retrieve a message. + * + * @example + * ```ts + * const message = await client.beta.threads.messages.retrieve( + * 'thread_id', + * 'message_id', + * ); + * ``` */ retrieve(threadId: string, messageId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/threads/${threadId}/messages/${messageId}`, { @@ -35,6 +51,14 @@ export class Messages extends APIResource { /** * Modifies a message. + * + * @example + * ```ts + * const message = await client.beta.threads.messages.update( + * 'thread_id', + * 'message_id', + * ); + * ``` */ update( threadId: string, @@ -51,6 +75,16 @@ export class Messages extends APIResource { /** * Returns a list of messages for a given thread. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const message of client.beta.threads.messages.list( + * 'thread_id', + * )) { + * // ... + * } + * ``` */ list( threadId: string, @@ -75,6 +109,15 @@ export class Messages extends APIResource { /** * Deletes a message. + * + * @example + * ```ts + * const messageDeleted = + * await client.beta.threads.messages.del( + * 'thread_id', + * 'message_id', + * ); + * ``` */ del(threadId: string, messageId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/threads/${threadId}/messages/${messageId}`, { diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 15bfb4204..25356df3c 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -45,6 +45,14 @@ export class Runs extends APIResource { /** * Create a run. + * + * @example + * ```ts + * const run = await client.beta.threads.runs.create( + * 'thread_id', + * { assistant_id: 'assistant_id' }, + * ); + * ``` */ create( threadId: string, @@ -78,6 +86,14 @@ export class Runs extends APIResource { /** * Retrieves a run. + * + * @example + * ```ts + * const run = await client.beta.threads.runs.retrieve( + * 'thread_id', + * 'run_id', + * ); + * ``` */ retrieve(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/threads/${threadId}/runs/${runId}`, { @@ -88,6 +104,14 @@ export class Runs extends APIResource { /** * Modifies a run. + * + * @example + * ```ts + * const run = await client.beta.threads.runs.update( + * 'thread_id', + * 'run_id', + * ); + * ``` */ update( threadId: string, @@ -104,6 +128,16 @@ export class Runs extends APIResource { /** * Returns a list of runs belonging to a thread. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const run of client.beta.threads.runs.list( + * 'thread_id', + * )) { + * // ... + * } + * ``` */ list( threadId: string, @@ -128,6 +162,14 @@ export class Runs extends APIResource { /** * Cancels a run that is `in_progress`. + * + * @example + * ```ts + * const run = await client.beta.threads.runs.cancel( + * 'thread_id', + * 'run_id', + * ); + * ``` */ cancel(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.post(`/threads/${threadId}/runs/${runId}/cancel`, { @@ -229,6 +271,16 @@ export class Runs extends APIResource { * `submit_tool_outputs`, this endpoint can be used to submit the outputs from the * tool calls once they're all completed. All outputs must be submitted in a single * request. + * + * @example + * ```ts + * const run = + * await client.beta.threads.runs.submitToolOutputs( + * 'thread_id', + * 'run_id', + * { tool_outputs: [{}] }, + * ); + * ``` */ submitToolOutputs( threadId: string, diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index c491b4e83..abd8d40ed 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -10,6 +10,16 @@ import { CursorPage, type CursorPageParams } from '../../../../pagination'; export class Steps extends APIResource { /** * Retrieves a run step. + * + * @example + * ```ts + * const runStep = + * await client.beta.threads.runs.steps.retrieve( + * 'thread_id', + * 'run_id', + * 'step_id', + * ); + * ``` */ retrieve( threadId: string, @@ -43,6 +53,17 @@ export class Steps extends APIResource { /** * Returns a list of run steps belonging to a run. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const runStep of client.beta.threads.runs.steps.list( + * 'thread_id', + * 'run_id', + * )) { + * // ... + * } + * ``` */ list( threadId: string, diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 2e5ab1cc8..c0c6bc8e4 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -73,6 +73,11 @@ export class Threads extends APIResource { /** * Create a thread. + * + * @example + * ```ts + * const thread = await client.beta.threads.create(); + * ``` */ create(body?: ThreadCreateParams, options?: Core.RequestOptions): Core.APIPromise; create(options?: Core.RequestOptions): Core.APIPromise; @@ -92,6 +97,13 @@ export class Threads extends APIResource { /** * Retrieves a thread. + * + * @example + * ```ts + * const thread = await client.beta.threads.retrieve( + * 'thread_id', + * ); + * ``` */ retrieve(threadId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/threads/${threadId}`, { @@ -102,6 +114,13 @@ export class Threads extends APIResource { /** * Modifies a thread. + * + * @example + * ```ts + * const thread = await client.beta.threads.update( + * 'thread_id', + * ); + * ``` */ update(threadId: string, body: ThreadUpdateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post(`/threads/${threadId}`, { @@ -113,6 +132,13 @@ export class Threads extends APIResource { /** * Delete a thread. + * + * @example + * ```ts + * const threadDeleted = await client.beta.threads.del( + * 'thread_id', + * ); + * ``` */ del(threadId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/threads/${threadId}`, { @@ -123,6 +149,13 @@ export class Threads extends APIResource { /** * Create a thread and run it in one request. + * + * @example + * ```ts + * const run = await client.beta.threads.createAndRun({ + * assistant_id: 'assistant_id', + * }); + * ``` */ createAndRun( body: ThreadCreateAndRunParamsNonStreaming, diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 251020337..6481f8e0f 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -33,6 +33,16 @@ export class Completions extends APIResource { * supported for reasoning models are noted below. For the current state of * unsupported parameters in reasoning models, * [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + * + * @example + * ```ts + * const chatCompletion = await client.chat.completions.create( + * { + * messages: [{ content: 'string', role: 'developer' }], + * model: 'gpt-4o', + * }, + * ); + * ``` */ create( body: ChatCompletionCreateParamsNonStreaming, @@ -58,6 +68,12 @@ export class Completions extends APIResource { /** * Get a stored chat completion. Only Chat Completions that have been created with * the `store` parameter set to `true` will be returned. + * + * @example + * ```ts + * const chatCompletion = + * await client.chat.completions.retrieve('completion_id'); + * ``` */ retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/chat/completions/${completionId}`, options); @@ -67,6 +83,14 @@ export class Completions extends APIResource { * Modify a stored chat completion. Only Chat Completions that have been created * with the `store` parameter set to `true` can be modified. Currently, the only * supported modification is to update the `metadata` field. + * + * @example + * ```ts + * const chatCompletion = await client.chat.completions.update( + * 'completion_id', + * { metadata: { foo: 'string' } }, + * ); + * ``` */ update( completionId: string, @@ -79,6 +103,14 @@ export class Completions extends APIResource { /** * List stored Chat Completions. Only Chat Completions that have been stored with * the `store` parameter set to `true` will be returned. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const chatCompletion of client.chat.completions.list()) { + * // ... + * } + * ``` */ list( query?: ChatCompletionListParams, @@ -98,6 +130,12 @@ export class Completions extends APIResource { /** * Delete a stored chat completion. Only Chat Completions that have been created * with the `store` parameter set to `true` can be deleted. + * + * @example + * ```ts + * const chatCompletionDeleted = + * await client.chat.completions.del('completion_id'); + * ``` */ del(completionId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/chat/completions/${completionId}`, options); diff --git a/src/resources/chat/completions/messages.ts b/src/resources/chat/completions/messages.ts index 519a33aff..ab3eb73f6 100644 --- a/src/resources/chat/completions/messages.ts +++ b/src/resources/chat/completions/messages.ts @@ -11,6 +11,16 @@ export class Messages extends APIResource { /** * Get the messages in a stored chat completion. Only Chat Completions that have * been created with the `store` parameter set to `true` will be returned. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const chatCompletionStoreMessage of client.chat.completions.messages.list( + * 'completion_id', + * )) { + * // ... + * } + * ``` */ list( completionId: string, diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 5cbec5e3c..07cb49ed9 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -10,6 +10,14 @@ import { Stream } from '../streaming'; export class Completions extends APIResource { /** * Creates a completion for the provided prompt and parameters. + * + * @example + * ```ts + * const completion = await client.completions.create({ + * model: 'string', + * prompt: 'This is a test.', + * }); + * ``` */ create(body: CompletionCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; create( diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index a4be9ca3c..cc040abff 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -6,6 +6,15 @@ import * as Core from '../core'; export class Embeddings extends APIResource { /** * Creates an embedding vector representing the input text. + * + * @example + * ```ts + * const createEmbeddingResponse = + * await client.embeddings.create({ + * input: 'The quick brown fox jumped over the lazy dog', + * model: 'text-embedding-3-small', + * }); + * ``` */ create( body: EmbeddingCreateParams, diff --git a/src/resources/fine-tuning/checkpoints/permissions.ts b/src/resources/fine-tuning/checkpoints/permissions.ts index e808b2001..dc25bab7f 100644 --- a/src/resources/fine-tuning/checkpoints/permissions.ts +++ b/src/resources/fine-tuning/checkpoints/permissions.ts @@ -11,6 +11,17 @@ export class Permissions extends APIResource { * * This enables organization owners to share fine-tuned models with other projects * in their organization. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const permissionCreateResponse of client.fineTuning.checkpoints.permissions.create( + * 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + * { project_ids: ['string'] }, + * )) { + * // ... + * } + * ``` */ create( fineTunedModelCheckpoint: string, @@ -29,6 +40,14 @@ export class Permissions extends APIResource { * * Organization owners can use this endpoint to view all permissions for a * fine-tuned model checkpoint. + * + * @example + * ```ts + * const permission = + * await client.fineTuning.checkpoints.permissions.retrieve( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * ); + * ``` */ retrieve( fineTunedModelCheckpoint: string, @@ -58,6 +77,15 @@ export class Permissions extends APIResource { * * Organization owners can use this endpoint to delete a permission for a * fine-tuned model checkpoint. + * + * @example + * ```ts + * const permission = + * await client.fineTuning.checkpoints.permissions.del( + * 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + * 'cp_zc4Q7MP6XxulcVzj4MZdwsAB', + * ); + * ``` */ del( fineTunedModelCheckpoint: string, diff --git a/src/resources/fine-tuning/jobs/checkpoints.ts b/src/resources/fine-tuning/jobs/checkpoints.ts index b3018ac5f..10902e715 100644 --- a/src/resources/fine-tuning/jobs/checkpoints.ts +++ b/src/resources/fine-tuning/jobs/checkpoints.ts @@ -8,6 +8,16 @@ import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Checkpoints extends APIResource { /** * List checkpoints for a fine-tuning job. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const fineTuningJobCheckpoint of client.fineTuning.jobs.checkpoints.list( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * )) { + * // ... + * } + * ``` */ list( fineTuningJobId: string, diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 2198e8174..0bc812917 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -23,6 +23,14 @@ export class Jobs extends APIResource { * of the fine-tuned models once complete. * * [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + * + * @example + * ```ts + * const fineTuningJob = await client.fineTuning.jobs.create({ + * model: 'gpt-4o-mini', + * training_file: 'file-abc123', + * }); + * ``` */ create(body: JobCreateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/fine_tuning/jobs', { body, ...options }); @@ -32,6 +40,13 @@ export class Jobs extends APIResource { * Get info about a fine-tuning job. * * [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + * + * @example + * ```ts + * const fineTuningJob = await client.fineTuning.jobs.retrieve( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * ); + * ``` */ retrieve(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/fine_tuning/jobs/${fineTuningJobId}`, options); @@ -39,6 +54,14 @@ export class Jobs extends APIResource { /** * List your organization's fine-tuning jobs + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const fineTuningJob of client.fineTuning.jobs.list()) { + * // ... + * } + * ``` */ list( query?: JobListParams, @@ -57,6 +80,13 @@ export class Jobs extends APIResource { /** * Immediately cancel a fine-tune job. + * + * @example + * ```ts + * const fineTuningJob = await client.fineTuning.jobs.cancel( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * ); + * ``` */ cancel(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.post(`/fine_tuning/jobs/${fineTuningJobId}/cancel`, options); @@ -64,6 +94,16 @@ export class Jobs extends APIResource { /** * Get status updates for a fine-tuning job. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const fineTuningJobEvent of client.fineTuning.jobs.listEvents( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * )) { + * // ... + * } + * ``` */ listEvents( fineTuningJobId: string, diff --git a/src/resources/images.ts b/src/resources/images.ts index 32f1e123c..c6b14833a 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -6,6 +6,13 @@ import * as Core from '../core'; export class Images extends APIResource { /** * Creates a variation of a given image. This endpoint only supports `dall-e-2`. + * + * @example + * ```ts + * const imagesResponse = await client.images.createVariation({ + * image: fs.createReadStream('otter.png'), + * }); + * ``` */ createVariation( body: ImageCreateVariationParams, @@ -17,6 +24,14 @@ export class Images extends APIResource { /** * Creates an edited or extended image given one or more source images and a * prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`. + * + * @example + * ```ts + * const imagesResponse = await client.images.edit({ + * image: fs.createReadStream('path/to/file'), + * prompt: 'A cute baby sea otter wearing a beret', + * }); + * ``` */ edit(body: ImageEditParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/images/edits', Core.multipartFormRequestOptions({ body, ...options })); @@ -25,6 +40,13 @@ export class Images extends APIResource { /** * Creates an image given a prompt. * [Learn more](https://platform.openai.com/docs/guides/images). + * + * @example + * ```ts + * const imagesResponse = await client.images.generate({ + * prompt: 'A cute baby sea otter', + * }); + * ``` */ generate(body: ImageGenerateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/images/generations', { body, ...options }); diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts index c88bb441d..74707f184 100644 --- a/src/resources/responses/input-items.ts +++ b/src/resources/responses/input-items.ts @@ -10,6 +10,16 @@ import { type CursorPageParams } from '../../pagination'; export class InputItems extends APIResource { /** * Returns a list of input items for a given response. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const responseItem of client.responses.inputItems.list( + * 'response_id', + * )) { + * // ... + * } + * ``` */ list( responseId: string, diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 0a6e3666d..1440e865e 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -61,6 +61,14 @@ export class Responses extends APIResource { * [web search](https://platform.openai.com/docs/guides/tools-web-search) or * [file search](https://platform.openai.com/docs/guides/tools-file-search) to use * your own data as input for the model's response. + * + * @example + * ```ts + * const response = await client.responses.create({ + * input: 'string', + * model: 'gpt-4o', + * }); + * ``` */ create(body: ResponseCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; create( @@ -90,6 +98,13 @@ export class Responses extends APIResource { /** * Retrieves a model response with the given ID. + * + * @example + * ```ts + * const response = await client.responses.retrieve( + * 'resp_677efb5139a88190b512bc3fef8e535d', + * ); + * ``` */ retrieve( responseId: string, @@ -110,6 +125,13 @@ export class Responses extends APIResource { /** * Deletes a model response with the given ID. + * + * @example + * ```ts + * await client.responses.del( + * 'resp_677efb5139a88190b512bc3fef8e535d', + * ); + * ``` */ del(responseId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/responses/${responseId}`, { From fabe6ec948c08c11588f6168f0a7560bf307d780 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 8 May 2025 17:23:50 +0000 Subject: [PATCH 238/246] feat(api): Add reinforcement fine-tuning api support --- .stats.yml | 8 +- api.md | 43 +- src/index.ts | 11 +- src/resources/evals/evals.ts | 732 ++---------------- src/resources/evals/index.ts | 3 - src/resources/fine-tuning/alpha.ts | 3 + src/resources/fine-tuning/alpha/alpha.ts | 27 + src/resources/fine-tuning/alpha/graders.ts | 168 ++++ src/resources/fine-tuning/alpha/index.ts | 10 + src/resources/fine-tuning/fine-tuning.ts | 28 + src/resources/fine-tuning/index.ts | 10 + src/resources/fine-tuning/jobs/jobs.ts | 203 +---- src/resources/fine-tuning/methods.ts | 152 ++++ src/resources/graders.ts | 3 + src/resources/graders/grader-models.ts | 296 +++++++ src/resources/graders/graders.ts | 31 + src/resources/graders/index.ts | 12 + src/resources/index.ts | 4 +- .../fine-tuning/alpha/graders.test.ts | 53 ++ .../fine-tuning/jobs/jobs.test.ts | 56 +- 20 files changed, 1019 insertions(+), 834 deletions(-) create mode 100644 src/resources/fine-tuning/alpha.ts create mode 100644 src/resources/fine-tuning/alpha/alpha.ts create mode 100644 src/resources/fine-tuning/alpha/graders.ts create mode 100644 src/resources/fine-tuning/alpha/index.ts create mode 100644 src/resources/fine-tuning/methods.ts create mode 100644 src/resources/graders.ts create mode 100644 src/resources/graders/grader-models.ts create mode 100644 src/resources/graders/graders.ts create mode 100644 src/resources/graders/index.ts create mode 100644 tests/api-resources/fine-tuning/alpha/graders.test.ts diff --git a/.stats.yml b/.stats.yml index 0c8278866..5f1bee851 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-0ee6b36cf3cc278cef4199a6aec5f7d530a6c1f17a74830037e96d50ca1edc50.yml -openapi_spec_hash: e8ec5f46bc0655b34f292422d58a60f6 -config_hash: d9b6b6e6bc85744663e300eebc482067 +configured_endpoints: 101 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-794a6ed3c3d3d77887564755168056af8a426b17cf1ec721e3a300503dc22a41.yml +openapi_spec_hash: 25a81c220713cd5b0bafc221d1dfa79a +config_hash: 0b768ed1b56c6d82816f0fa40dc4aaf5 diff --git a/api.md b/api.md index 49e6548a8..cad696e7e 100644 --- a/api.md +++ b/api.md @@ -207,6 +207,17 @@ Methods: # FineTuning +## Methods + +Types: + +- DpoHyperparameters +- DpoMethod +- ReinforcementHyperparameters +- ReinforcementMethod +- SupervisedHyperparameters +- SupervisedMethod + ## Jobs Types: @@ -224,6 +235,8 @@ Methods: - client.fineTuning.jobs.list({ ...params }) -> FineTuningJobsPage - client.fineTuning.jobs.cancel(fineTuningJobId) -> FineTuningJob - client.fineTuning.jobs.listEvents(fineTuningJobId, { ...params }) -> FineTuningJobEventsPage +- client.fineTuning.jobs.pause(fineTuningJobId) -> FineTuningJob +- client.fineTuning.jobs.resume(fineTuningJobId) -> FineTuningJob ### Checkpoints @@ -251,6 +264,33 @@ Methods: - client.fineTuning.checkpoints.permissions.retrieve(fineTunedModelCheckpoint, { ...params }) -> PermissionRetrieveResponse - client.fineTuning.checkpoints.permissions.del(fineTunedModelCheckpoint, permissionId) -> PermissionDeleteResponse +## Alpha + +### Graders + +Types: + +- GraderRunResponse +- GraderValidateResponse + +Methods: + +- client.fineTuning.alpha.graders.run({ ...params }) -> GraderRunResponse +- client.fineTuning.alpha.graders.validate({ ...params }) -> GraderValidateResponse + +# Graders + +## GraderModels + +Types: + +- LabelModelGrader +- MultiGrader +- PythonGrader +- ScoreModelGrader +- StringCheckGrader +- TextSimilarityGrader + # VectorStores Types: @@ -669,10 +709,7 @@ Methods: Types: - EvalCustomDataSourceConfig -- EvalLabelModelGrader - EvalStoredCompletionsDataSourceConfig -- EvalStringCheckGrader -- EvalTextSimilarityGrader - EvalCreateResponse - EvalRetrieveResponse - EvalUpdateResponse diff --git a/src/index.ts b/src/index.ts index 9e8d7ce37..537c18f43 100644 --- a/src/index.ts +++ b/src/index.ts @@ -71,19 +71,17 @@ import { EvalCreateResponse, EvalCustomDataSourceConfig, EvalDeleteResponse, - EvalLabelModelGrader, EvalListParams, EvalListResponse, EvalListResponsesPage, EvalRetrieveResponse, EvalStoredCompletionsDataSourceConfig, - EvalStringCheckGrader, - EvalTextSimilarityGrader, EvalUpdateParams, EvalUpdateResponse, Evals, } from './resources/evals/evals'; import { FineTuning } from './resources/fine-tuning/fine-tuning'; +import { Graders } from './resources/graders/graders'; import { Responses } from './resources/responses/responses'; import { Upload, @@ -305,6 +303,7 @@ export class OpenAI extends Core.APIClient { moderations: API.Moderations = new API.Moderations(this); models: API.Models = new API.Models(this); fineTuning: API.FineTuning = new API.FineTuning(this); + graders: API.Graders = new API.Graders(this); vectorStores: API.VectorStores = new API.VectorStores(this); beta: API.Beta = new API.Beta(this); batches: API.Batches = new API.Batches(this); @@ -366,6 +365,7 @@ OpenAI.Moderations = Moderations; OpenAI.Models = Models; OpenAI.ModelsPage = ModelsPage; OpenAI.FineTuning = FineTuning; +OpenAI.Graders = Graders; OpenAI.VectorStores = VectorStores; OpenAI.VectorStoresPage = VectorStoresPage; OpenAI.VectorStoreSearchResponsesPage = VectorStoreSearchResponsesPage; @@ -487,6 +487,8 @@ export declare namespace OpenAI { export { FineTuning as FineTuning }; + export { Graders as Graders }; + export { VectorStores as VectorStores, type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, @@ -531,10 +533,7 @@ export declare namespace OpenAI { export { Evals as Evals, type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, - type EvalLabelModelGrader as EvalLabelModelGrader, type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, - type EvalStringCheckGrader as EvalStringCheckGrader, - type EvalTextSimilarityGrader as EvalTextSimilarityGrader, type EvalCreateResponse as EvalCreateResponse, type EvalRetrieveResponse as EvalRetrieveResponse, type EvalUpdateResponse as EvalUpdateResponse, diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts index caef7acc0..0f7166df4 100644 --- a/src/resources/evals/evals.ts +++ b/src/resources/evals/evals.ts @@ -4,6 +4,7 @@ import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; import * as Core from '../../core'; import * as Shared from '../shared'; +import * as GraderModelsAPI from '../graders/grader-models'; import * as ResponsesAPI from '../responses/responses'; import * as RunsAPI from './runs/runs'; import { @@ -103,83 +104,6 @@ export interface EvalCustomDataSourceConfig { type: 'custom'; } -/** - * A LabelModelGrader object which uses a model to assign labels to each item in - * the evaluation. - */ -export interface EvalLabelModelGrader { - input: Array; - - /** - * The labels to assign to each item in the evaluation. - */ - labels: Array; - - /** - * The model to use for the evaluation. Must support structured outputs. - */ - model: string; - - /** - * The name of the grader. - */ - name: string; - - /** - * The labels that indicate a passing result. Must be a subset of labels. - */ - passing_labels: Array; - - /** - * The object type, which is always `label_model`. - */ - type: 'label_model'; -} - -export namespace EvalLabelModelGrader { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } -} - /** * A StoredCompletionsDataSourceConfig which specifies the metadata property of * your stored completions query. This is usually metadata like `usecase=chatbot` @@ -210,83 +134,6 @@ export interface EvalStoredCompletionsDataSourceConfig { metadata?: Shared.Metadata | null; } -/** - * A StringCheckGrader object that performs a string comparison between input and - * reference using a specified operation. - */ -export interface EvalStringCheckGrader { - /** - * The input text. This may include template strings. - */ - input: string; - - /** - * The name of the grader. - */ - name: string; - - /** - * The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`. - */ - operation: 'eq' | 'ne' | 'like' | 'ilike'; - - /** - * The reference text. This may include template strings. - */ - reference: string; - - /** - * The object type, which is always `string_check`. - */ - type: 'string_check'; -} - -/** - * A TextSimilarityGrader object which grades text based on similarity metrics. - */ -export interface EvalTextSimilarityGrader { - /** - * The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, - * `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. - */ - evaluation_metric: - | 'fuzzy_match' - | 'bleu' - | 'gleu' - | 'meteor' - | 'rouge_1' - | 'rouge_2' - | 'rouge_3' - | 'rouge_4' - | 'rouge_5' - | 'rouge_l'; - - /** - * The text being graded. - */ - input: string; - - /** - * A float score where a value greater than or equal indicates a passing grade. - */ - pass_threshold: number; - - /** - * The text being graded against. - */ - reference: string; - - /** - * The type of grader. - */ - type: 'text_similarity'; - - /** - * The name of the grader. - */ - name?: string; -} - /** * An Eval object with a data source config and testing criteria. An Eval * represents a task to be done for your LLM integration. Like: @@ -335,39 +182,29 @@ export interface EvalCreateResponse { * A list of testing criteria. */ testing_criteria: Array< - | EvalLabelModelGrader - | EvalStringCheckGrader - | EvalTextSimilarityGrader - | EvalCreateResponse.Python - | EvalCreateResponse.ScoreModel + | GraderModelsAPI.LabelModelGrader + | GraderModelsAPI.StringCheckGrader + | EvalCreateResponse.EvalGraderTextSimilarity + | EvalCreateResponse.EvalGraderPython + | EvalCreateResponse.EvalGraderScoreModel >; } export namespace EvalCreateResponse { /** - * A PythonGrader object that runs a python script on the input. + * A TextSimilarityGrader object which grades text based on similarity metrics. */ - export interface Python { - /** - * The name of the grader. - */ - name: string; - + export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader { /** - * The source code of the python script. - */ - source: string; - - /** - * The object type, which is always `python`. - */ - type: 'python'; - - /** - * The image tag to use for the python script. + * The threshold for the score. */ - image_tag?: string; + pass_threshold: number; + } + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface EvalGraderPython extends GraderModelsAPI.PythonGrader { /** * The threshold for the score. */ @@ -377,85 +214,11 @@ export namespace EvalCreateResponse { /** * A ScoreModelGrader object that uses a model to assign a score to the input. */ - export interface ScoreModel { - /** - * The input text. This may include template strings. - */ - input: Array; - - /** - * The model to use for the evaluation. - */ - model: string; - - /** - * The name of the grader. - */ - name: string; - - /** - * The object type, which is always `score_model`. - */ - type: 'score_model'; - + export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader { /** * The threshold for the score. */ pass_threshold?: number; - - /** - * The range of the score. Defaults to `[0, 1]`. - */ - range?: Array; - - /** - * The sampling parameters for the model. - */ - sampling_params?: unknown; - } - - export namespace ScoreModel { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } } } @@ -507,39 +270,29 @@ export interface EvalRetrieveResponse { * A list of testing criteria. */ testing_criteria: Array< - | EvalLabelModelGrader - | EvalStringCheckGrader - | EvalTextSimilarityGrader - | EvalRetrieveResponse.Python - | EvalRetrieveResponse.ScoreModel + | GraderModelsAPI.LabelModelGrader + | GraderModelsAPI.StringCheckGrader + | EvalRetrieveResponse.EvalGraderTextSimilarity + | EvalRetrieveResponse.EvalGraderPython + | EvalRetrieveResponse.EvalGraderScoreModel >; } export namespace EvalRetrieveResponse { /** - * A PythonGrader object that runs a python script on the input. + * A TextSimilarityGrader object which grades text based on similarity metrics. */ - export interface Python { + export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader { /** - * The name of the grader. - */ - name: string; - - /** - * The source code of the python script. - */ - source: string; - - /** - * The object type, which is always `python`. - */ - type: 'python'; - - /** - * The image tag to use for the python script. + * The threshold for the score. */ - image_tag?: string; + pass_threshold: number; + } + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface EvalGraderPython extends GraderModelsAPI.PythonGrader { /** * The threshold for the score. */ @@ -549,85 +302,11 @@ export namespace EvalRetrieveResponse { /** * A ScoreModelGrader object that uses a model to assign a score to the input. */ - export interface ScoreModel { - /** - * The input text. This may include template strings. - */ - input: Array; - - /** - * The model to use for the evaluation. - */ - model: string; - - /** - * The name of the grader. - */ - name: string; - - /** - * The object type, which is always `score_model`. - */ - type: 'score_model'; - + export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader { /** * The threshold for the score. */ pass_threshold?: number; - - /** - * The range of the score. Defaults to `[0, 1]`. - */ - range?: Array; - - /** - * The sampling parameters for the model. - */ - sampling_params?: unknown; - } - - export namespace ScoreModel { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } } } @@ -679,39 +358,29 @@ export interface EvalUpdateResponse { * A list of testing criteria. */ testing_criteria: Array< - | EvalLabelModelGrader - | EvalStringCheckGrader - | EvalTextSimilarityGrader - | EvalUpdateResponse.Python - | EvalUpdateResponse.ScoreModel + | GraderModelsAPI.LabelModelGrader + | GraderModelsAPI.StringCheckGrader + | EvalUpdateResponse.EvalGraderTextSimilarity + | EvalUpdateResponse.EvalGraderPython + | EvalUpdateResponse.EvalGraderScoreModel >; } export namespace EvalUpdateResponse { /** - * A PythonGrader object that runs a python script on the input. + * A TextSimilarityGrader object which grades text based on similarity metrics. */ - export interface Python { - /** - * The name of the grader. - */ - name: string; - - /** - * The source code of the python script. - */ - source: string; - - /** - * The object type, which is always `python`. - */ - type: 'python'; - + export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader { /** - * The image tag to use for the python script. + * The threshold for the score. */ - image_tag?: string; + pass_threshold: number; + } + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface EvalGraderPython extends GraderModelsAPI.PythonGrader { /** * The threshold for the score. */ @@ -721,85 +390,11 @@ export namespace EvalUpdateResponse { /** * A ScoreModelGrader object that uses a model to assign a score to the input. */ - export interface ScoreModel { - /** - * The input text. This may include template strings. - */ - input: Array; - - /** - * The model to use for the evaluation. - */ - model: string; - - /** - * The name of the grader. - */ - name: string; - - /** - * The object type, which is always `score_model`. - */ - type: 'score_model'; - + export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader { /** * The threshold for the score. */ pass_threshold?: number; - - /** - * The range of the score. Defaults to `[0, 1]`. - */ - range?: Array; - - /** - * The sampling parameters for the model. - */ - sampling_params?: unknown; - } - - export namespace ScoreModel { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } } } @@ -851,39 +446,29 @@ export interface EvalListResponse { * A list of testing criteria. */ testing_criteria: Array< - | EvalLabelModelGrader - | EvalStringCheckGrader - | EvalTextSimilarityGrader - | EvalListResponse.Python - | EvalListResponse.ScoreModel + | GraderModelsAPI.LabelModelGrader + | GraderModelsAPI.StringCheckGrader + | EvalListResponse.EvalGraderTextSimilarity + | EvalListResponse.EvalGraderPython + | EvalListResponse.EvalGraderScoreModel >; } export namespace EvalListResponse { /** - * A PythonGrader object that runs a python script on the input. + * A TextSimilarityGrader object which grades text based on similarity metrics. */ - export interface Python { - /** - * The name of the grader. - */ - name: string; - - /** - * The source code of the python script. - */ - source: string; - + export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader { /** - * The object type, which is always `python`. - */ - type: 'python'; - - /** - * The image tag to use for the python script. + * The threshold for the score. */ - image_tag?: string; + pass_threshold: number; + } + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface EvalGraderPython extends GraderModelsAPI.PythonGrader { /** * The threshold for the score. */ @@ -893,85 +478,11 @@ export namespace EvalListResponse { /** * A ScoreModelGrader object that uses a model to assign a score to the input. */ - export interface ScoreModel { - /** - * The input text. This may include template strings. - */ - input: Array; - - /** - * The model to use for the evaluation. - */ - model: string; - - /** - * The name of the grader. - */ - name: string; - - /** - * The object type, which is always `score_model`. - */ - type: 'score_model'; - + export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader { /** * The threshold for the score. */ pass_threshold?: number; - - /** - * The range of the score. Defaults to `[0, 1]`. - */ - range?: Array; - - /** - * The sampling parameters for the model. - */ - sampling_params?: unknown; - } - - export namespace ScoreModel { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } } } @@ -987,15 +498,15 @@ export interface EvalCreateParams { /** * The configuration for the data source used for the evaluation runs. */ - data_source_config: EvalCreateParams.Custom | EvalCreateParams.Logs; + data_source_config: EvalCreateParams.Custom | EvalCreateParams.StoredCompletions; /** * A list of graders for all eval runs in this group. */ testing_criteria: Array< | EvalCreateParams.LabelModel - | EvalStringCheckGrader - | EvalTextSimilarityGrader + | GraderModelsAPI.StringCheckGrader + | EvalCreateParams.TextSimilarity | EvalCreateParams.Python | EvalCreateParams.ScoreModel >; @@ -1048,14 +559,14 @@ export namespace EvalCreateParams { * completions query. This is usually metadata like `usecase=chatbot` or * `prompt-version=v2`, etc. */ - export interface Logs { + export interface StoredCompletions { /** - * The type of data source. Always `logs`. + * The type of data source. Always `stored_completions`. */ - type: 'logs'; + type: 'stored_completions'; /** - * Metadata filters for the logs data source. + * Metadata filters for the stored completions data source. */ metadata?: Record; } @@ -1154,29 +665,19 @@ export namespace EvalCreateParams { } /** - * A PythonGrader object that runs a python script on the input. + * A TextSimilarityGrader object which grades text based on similarity metrics. */ - export interface Python { + export interface TextSimilarity extends GraderModelsAPI.TextSimilarityGrader { /** - * The name of the grader. - */ - name: string; - - /** - * The source code of the python script. - */ - source: string; - - /** - * The object type, which is always `python`. - */ - type: 'python'; - - /** - * The image tag to use for the python script. + * The threshold for the score. */ - image_tag?: string; + pass_threshold: number; + } + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface Python extends GraderModelsAPI.PythonGrader { /** * The threshold for the score. */ @@ -1186,85 +687,11 @@ export namespace EvalCreateParams { /** * A ScoreModelGrader object that uses a model to assign a score to the input. */ - export interface ScoreModel { - /** - * The input text. This may include template strings. - */ - input: Array; - - /** - * The model to use for the evaluation. - */ - model: string; - - /** - * The name of the grader. - */ - name: string; - - /** - * The object type, which is always `score_model`. - */ - type: 'score_model'; - + export interface ScoreModel extends GraderModelsAPI.ScoreModelGrader { /** * The threshold for the score. */ pass_threshold?: number; - - /** - * The range of the score. Defaults to `[0, 1]`. - */ - range?: Array; - - /** - * The sampling parameters for the model. - */ - sampling_params?: unknown; - } - - export namespace ScoreModel { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } } } @@ -1306,10 +733,7 @@ Evals.RunListResponsesPage = RunListResponsesPage; export declare namespace Evals { export { type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, - type EvalLabelModelGrader as EvalLabelModelGrader, type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, - type EvalStringCheckGrader as EvalStringCheckGrader, - type EvalTextSimilarityGrader as EvalTextSimilarityGrader, type EvalCreateResponse as EvalCreateResponse, type EvalRetrieveResponse as EvalRetrieveResponse, type EvalUpdateResponse as EvalUpdateResponse, diff --git a/src/resources/evals/index.ts b/src/resources/evals/index.ts index a246fe4e7..b2627fbf3 100644 --- a/src/resources/evals/index.ts +++ b/src/resources/evals/index.ts @@ -4,10 +4,7 @@ export { EvalListResponsesPage, Evals, type EvalCustomDataSourceConfig, - type EvalLabelModelGrader, type EvalStoredCompletionsDataSourceConfig, - type EvalStringCheckGrader, - type EvalTextSimilarityGrader, type EvalCreateResponse, type EvalRetrieveResponse, type EvalUpdateResponse, diff --git a/src/resources/fine-tuning/alpha.ts b/src/resources/fine-tuning/alpha.ts new file mode 100644 index 000000000..446b6431e --- /dev/null +++ b/src/resources/fine-tuning/alpha.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './alpha/index'; diff --git a/src/resources/fine-tuning/alpha/alpha.ts b/src/resources/fine-tuning/alpha/alpha.ts new file mode 100644 index 000000000..77d695195 --- /dev/null +++ b/src/resources/fine-tuning/alpha/alpha.ts @@ -0,0 +1,27 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as GradersAPI from './graders'; +import { + GraderRunParams, + GraderRunResponse, + GraderValidateParams, + GraderValidateResponse, + Graders, +} from './graders'; + +export class Alpha extends APIResource { + graders: GradersAPI.Graders = new GradersAPI.Graders(this._client); +} + +Alpha.Graders = Graders; + +export declare namespace Alpha { + export { + Graders as Graders, + type GraderRunResponse as GraderRunResponse, + type GraderValidateResponse as GraderValidateResponse, + type GraderRunParams as GraderRunParams, + type GraderValidateParams as GraderValidateParams, + }; +} diff --git a/src/resources/fine-tuning/alpha/graders.ts b/src/resources/fine-tuning/alpha/graders.ts new file mode 100644 index 000000000..a9ef57f71 --- /dev/null +++ b/src/resources/fine-tuning/alpha/graders.ts @@ -0,0 +1,168 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as Core from '../../../core'; +import * as GraderModelsAPI from '../../graders/grader-models'; + +export class Graders extends APIResource { + /** + * Run a grader. + * + * @example + * ```ts + * const response = await client.fineTuning.alpha.graders.run({ + * grader: { + * input: 'input', + * name: 'name', + * operation: 'eq', + * reference: 'reference', + * type: 'string_check', + * }, + * model_sample: 'model_sample', + * reference_answer: 'string', + * }); + * ``` + */ + run(body: GraderRunParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/fine_tuning/alpha/graders/run', { body, ...options }); + } + + /** + * Validate a grader. + * + * @example + * ```ts + * const response = + * await client.fineTuning.alpha.graders.validate({ + * grader: { + * input: 'input', + * name: 'name', + * operation: 'eq', + * reference: 'reference', + * type: 'string_check', + * }, + * }); + * ``` + */ + validate( + body: GraderValidateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post('/fine_tuning/alpha/graders/validate', { body, ...options }); + } +} + +export interface GraderRunResponse { + metadata: GraderRunResponse.Metadata; + + model_grader_token_usage_per_model: Record; + + reward: number; + + sub_rewards: Record; +} + +export namespace GraderRunResponse { + export interface Metadata { + errors: Metadata.Errors; + + execution_time: number; + + name: string; + + sampled_model_name: string | null; + + scores: Record; + + token_usage: number | null; + + type: string; + } + + export namespace Metadata { + export interface Errors { + formula_parse_error: boolean; + + invalid_variable_error: boolean; + + model_grader_parse_error: boolean; + + model_grader_refusal_error: boolean; + + model_grader_server_error: boolean; + + model_grader_server_error_details: string | null; + + other_error: boolean; + + python_grader_runtime_error: boolean; + + python_grader_runtime_error_details: string | null; + + python_grader_server_error: boolean; + + python_grader_server_error_type: string | null; + + sample_parse_error: boolean; + + truncated_observation_error: boolean; + + unresponsive_reward_error: boolean; + } + } +} + +export interface GraderValidateResponse { + /** + * The grader used for the fine-tuning job. + */ + grader?: + | GraderModelsAPI.StringCheckGrader + | GraderModelsAPI.TextSimilarityGrader + | GraderModelsAPI.PythonGrader + | GraderModelsAPI.ScoreModelGrader + | GraderModelsAPI.MultiGrader; +} + +export interface GraderRunParams { + /** + * The grader used for the fine-tuning job. + */ + grader: + | GraderModelsAPI.StringCheckGrader + | GraderModelsAPI.TextSimilarityGrader + | GraderModelsAPI.PythonGrader + | GraderModelsAPI.ScoreModelGrader + | GraderModelsAPI.MultiGrader; + + /** + * The model sample to be evaluated. + */ + model_sample: string; + + /** + * The reference answer for the evaluation. + */ + reference_answer: string | unknown | Array | number; +} + +export interface GraderValidateParams { + /** + * The grader used for the fine-tuning job. + */ + grader: + | GraderModelsAPI.StringCheckGrader + | GraderModelsAPI.TextSimilarityGrader + | GraderModelsAPI.PythonGrader + | GraderModelsAPI.ScoreModelGrader + | GraderModelsAPI.MultiGrader; +} + +export declare namespace Graders { + export { + type GraderRunResponse as GraderRunResponse, + type GraderValidateResponse as GraderValidateResponse, + type GraderRunParams as GraderRunParams, + type GraderValidateParams as GraderValidateParams, + }; +} diff --git a/src/resources/fine-tuning/alpha/index.ts b/src/resources/fine-tuning/alpha/index.ts new file mode 100644 index 000000000..47b229bc3 --- /dev/null +++ b/src/resources/fine-tuning/alpha/index.ts @@ -0,0 +1,10 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Alpha } from './alpha'; +export { + Graders, + type GraderRunResponse, + type GraderValidateResponse, + type GraderRunParams, + type GraderValidateParams, +} from './graders'; diff --git a/src/resources/fine-tuning/fine-tuning.ts b/src/resources/fine-tuning/fine-tuning.ts index 9b0a01992..8fb54983b 100644 --- a/src/resources/fine-tuning/fine-tuning.ts +++ b/src/resources/fine-tuning/fine-tuning.ts @@ -1,6 +1,18 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; +import * as MethodsAPI from './methods'; +import { + DpoHyperparameters, + DpoMethod, + Methods, + ReinforcementHyperparameters, + ReinforcementMethod, + SupervisedHyperparameters, + SupervisedMethod, +} from './methods'; +import * as AlphaAPI from './alpha/alpha'; +import { Alpha } from './alpha/alpha'; import * as CheckpointsAPI from './checkpoints/checkpoints'; import { Checkpoints } from './checkpoints/checkpoints'; import * as JobsAPI from './jobs/jobs'; @@ -19,16 +31,30 @@ import { } from './jobs/jobs'; export class FineTuning extends APIResource { + methods: MethodsAPI.Methods = new MethodsAPI.Methods(this._client); jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client); checkpoints: CheckpointsAPI.Checkpoints = new CheckpointsAPI.Checkpoints(this._client); + alpha: AlphaAPI.Alpha = new AlphaAPI.Alpha(this._client); } +FineTuning.Methods = Methods; FineTuning.Jobs = Jobs; FineTuning.FineTuningJobsPage = FineTuningJobsPage; FineTuning.FineTuningJobEventsPage = FineTuningJobEventsPage; FineTuning.Checkpoints = Checkpoints; +FineTuning.Alpha = Alpha; export declare namespace FineTuning { + export { + Methods as Methods, + type DpoHyperparameters as DpoHyperparameters, + type DpoMethod as DpoMethod, + type ReinforcementHyperparameters as ReinforcementHyperparameters, + type ReinforcementMethod as ReinforcementMethod, + type SupervisedHyperparameters as SupervisedHyperparameters, + type SupervisedMethod as SupervisedMethod, + }; + export { Jobs as Jobs, type FineTuningJob as FineTuningJob, @@ -44,4 +70,6 @@ export declare namespace FineTuning { }; export { Checkpoints as Checkpoints }; + + export { Alpha as Alpha }; } diff --git a/src/resources/fine-tuning/index.ts b/src/resources/fine-tuning/index.ts index d23161c62..878ac402d 100644 --- a/src/resources/fine-tuning/index.ts +++ b/src/resources/fine-tuning/index.ts @@ -1,5 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export { Alpha } from './alpha/index'; export { Checkpoints } from './checkpoints/index'; export { FineTuning } from './fine-tuning'; export { @@ -15,3 +16,12 @@ export { type JobListParams, type JobListEventsParams, } from './jobs/index'; +export { + Methods, + type DpoHyperparameters, + type DpoMethod, + type ReinforcementHyperparameters, + type ReinforcementMethod, + type SupervisedHyperparameters, + type SupervisedMethod, +} from './methods'; diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 0bc812917..08616cd4f 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -3,6 +3,7 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; +import * as MethodsAPI from '../methods'; import * as CheckpointsAPI from './checkpoints'; import { CheckpointListParams, @@ -127,6 +128,34 @@ export class Jobs extends APIResource { ...options, }); } + + /** + * Pause a fine-tune job. + * + * @example + * ```ts + * const fineTuningJob = await client.fineTuning.jobs.pause( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * ); + * ``` + */ + pause(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post(`/fine_tuning/jobs/${fineTuningJobId}/pause`, options); + } + + /** + * Resume a fine-tune job. + * + * @example + * ```ts + * const fineTuningJob = await client.fineTuning.jobs.resume( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * ); + * ``` + */ + resume(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post(`/fine_tuning/jobs/${fineTuningJobId}/resume`, options); + } } export class FineTuningJobsPage extends CursorPage {} @@ -293,97 +322,24 @@ export namespace FineTuningJob { */ export interface Method { /** - * Configuration for the DPO fine-tuning method. - */ - dpo?: Method.Dpo; - - /** - * Configuration for the supervised fine-tuning method. + * The type of method. Is either `supervised`, `dpo`, or `reinforcement`. */ - supervised?: Method.Supervised; + type: 'supervised' | 'dpo' | 'reinforcement'; /** - * The type of method. Is either `supervised` or `dpo`. + * Configuration for the DPO fine-tuning method. */ - type?: 'supervised' | 'dpo'; - } + dpo?: MethodsAPI.DpoMethod; - export namespace Method { /** - * Configuration for the DPO fine-tuning method. + * Configuration for the reinforcement fine-tuning method. */ - export interface Dpo { - /** - * The hyperparameters used for the fine-tuning job. - */ - hyperparameters?: Dpo.Hyperparameters; - } - - export namespace Dpo { - /** - * The hyperparameters used for the fine-tuning job. - */ - export interface Hyperparameters { - /** - * Number of examples in each batch. A larger batch size means that model - * parameters are updated less frequently, but with lower variance. - */ - batch_size?: 'auto' | number; - - /** - * The beta value for the DPO method. A higher beta value will increase the weight - * of the penalty between the policy and reference model. - */ - beta?: 'auto' | number; - - /** - * Scaling factor for the learning rate. A smaller learning rate may be useful to - * avoid overfitting. - */ - learning_rate_multiplier?: 'auto' | number; - - /** - * The number of epochs to train the model for. An epoch refers to one full cycle - * through the training dataset. - */ - n_epochs?: 'auto' | number; - } - } + reinforcement?: MethodsAPI.ReinforcementMethod; /** * Configuration for the supervised fine-tuning method. */ - export interface Supervised { - /** - * The hyperparameters used for the fine-tuning job. - */ - hyperparameters?: Supervised.Hyperparameters; - } - - export namespace Supervised { - /** - * The hyperparameters used for the fine-tuning job. - */ - export interface Hyperparameters { - /** - * Number of examples in each batch. A larger batch size means that model - * parameters are updated less frequently, but with lower variance. - */ - batch_size?: 'auto' | number; - - /** - * Scaling factor for the learning rate. A smaller learning rate may be useful to - * avoid overfitting. - */ - learning_rate_multiplier?: 'auto' | number; - - /** - * The number of epochs to train the model for. An epoch refers to one full cycle - * through the training dataset. - */ - n_epochs?: 'auto' | number; - } - } + supervised?: MethodsAPI.SupervisedMethod; } } @@ -637,97 +593,24 @@ export namespace JobCreateParams { */ export interface Method { /** - * Configuration for the DPO fine-tuning method. - */ - dpo?: Method.Dpo; - - /** - * Configuration for the supervised fine-tuning method. + * The type of method. Is either `supervised`, `dpo`, or `reinforcement`. */ - supervised?: Method.Supervised; + type: 'supervised' | 'dpo' | 'reinforcement'; /** - * The type of method. Is either `supervised` or `dpo`. + * Configuration for the DPO fine-tuning method. */ - type?: 'supervised' | 'dpo'; - } + dpo?: MethodsAPI.DpoMethod; - export namespace Method { /** - * Configuration for the DPO fine-tuning method. + * Configuration for the reinforcement fine-tuning method. */ - export interface Dpo { - /** - * The hyperparameters used for the fine-tuning job. - */ - hyperparameters?: Dpo.Hyperparameters; - } - - export namespace Dpo { - /** - * The hyperparameters used for the fine-tuning job. - */ - export interface Hyperparameters { - /** - * Number of examples in each batch. A larger batch size means that model - * parameters are updated less frequently, but with lower variance. - */ - batch_size?: 'auto' | number; - - /** - * The beta value for the DPO method. A higher beta value will increase the weight - * of the penalty between the policy and reference model. - */ - beta?: 'auto' | number; - - /** - * Scaling factor for the learning rate. A smaller learning rate may be useful to - * avoid overfitting. - */ - learning_rate_multiplier?: 'auto' | number; - - /** - * The number of epochs to train the model for. An epoch refers to one full cycle - * through the training dataset. - */ - n_epochs?: 'auto' | number; - } - } + reinforcement?: MethodsAPI.ReinforcementMethod; /** * Configuration for the supervised fine-tuning method. */ - export interface Supervised { - /** - * The hyperparameters used for the fine-tuning job. - */ - hyperparameters?: Supervised.Hyperparameters; - } - - export namespace Supervised { - /** - * The hyperparameters used for the fine-tuning job. - */ - export interface Hyperparameters { - /** - * Number of examples in each batch. A larger batch size means that model - * parameters are updated less frequently, but with lower variance. - */ - batch_size?: 'auto' | number; - - /** - * Scaling factor for the learning rate. A smaller learning rate may be useful to - * avoid overfitting. - */ - learning_rate_multiplier?: 'auto' | number; - - /** - * The number of epochs to train the model for. An epoch refers to one full cycle - * through the training dataset. - */ - n_epochs?: 'auto' | number; - } - } + supervised?: MethodsAPI.SupervisedMethod; } } diff --git a/src/resources/fine-tuning/methods.ts b/src/resources/fine-tuning/methods.ts new file mode 100644 index 000000000..aa459c74c --- /dev/null +++ b/src/resources/fine-tuning/methods.ts @@ -0,0 +1,152 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as GraderModelsAPI from '../graders/grader-models'; + +export class Methods extends APIResource {} + +/** + * The hyperparameters used for the DPO fine-tuning job. + */ +export interface DpoHyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * The beta value for the DPO method. A higher beta value will increase the weight + * of the penalty between the policy and reference model. + */ + beta?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; +} + +/** + * Configuration for the DPO fine-tuning method. + */ +export interface DpoMethod { + /** + * The hyperparameters used for the DPO fine-tuning job. + */ + hyperparameters?: DpoHyperparameters; +} + +/** + * The hyperparameters used for the reinforcement fine-tuning job. + */ +export interface ReinforcementHyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * Multiplier on amount of compute used for exploring search space during training. + */ + compute_multiplier?: 'auto' | number; + + /** + * The number of training steps between evaluation runs. + */ + eval_interval?: 'auto' | number; + + /** + * Number of evaluation samples to generate per training step. + */ + eval_samples?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; + + /** + * Level of reasoning effort. + */ + reasoning_effort?: 'default' | 'low' | 'medium' | 'high'; +} + +/** + * Configuration for the reinforcement fine-tuning method. + */ +export interface ReinforcementMethod { + /** + * The grader used for the fine-tuning job. + */ + grader: + | GraderModelsAPI.StringCheckGrader + | GraderModelsAPI.TextSimilarityGrader + | GraderModelsAPI.PythonGrader + | GraderModelsAPI.ScoreModelGrader + | GraderModelsAPI.MultiGrader; + + /** + * The hyperparameters used for the reinforcement fine-tuning job. + */ + hyperparameters?: ReinforcementHyperparameters; +} + +/** + * The hyperparameters used for the fine-tuning job. + */ +export interface SupervisedHyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; +} + +/** + * Configuration for the supervised fine-tuning method. + */ +export interface SupervisedMethod { + /** + * The hyperparameters used for the fine-tuning job. + */ + hyperparameters?: SupervisedHyperparameters; +} + +export declare namespace Methods { + export { + type DpoHyperparameters as DpoHyperparameters, + type DpoMethod as DpoMethod, + type ReinforcementHyperparameters as ReinforcementHyperparameters, + type ReinforcementMethod as ReinforcementMethod, + type SupervisedHyperparameters as SupervisedHyperparameters, + type SupervisedMethod as SupervisedMethod, + }; +} diff --git a/src/resources/graders.ts b/src/resources/graders.ts new file mode 100644 index 000000000..2ea9aa959 --- /dev/null +++ b/src/resources/graders.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './graders/index'; diff --git a/src/resources/graders/grader-models.ts b/src/resources/graders/grader-models.ts new file mode 100644 index 000000000..9ee08f75f --- /dev/null +++ b/src/resources/graders/grader-models.ts @@ -0,0 +1,296 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as ResponsesAPI from '../responses/responses'; + +export class GraderModels extends APIResource {} + +/** + * A LabelModelGrader object which uses a model to assign labels to each item in + * the evaluation. + */ +export interface LabelModelGrader { + input: Array; + + /** + * The labels to assign to each item in the evaluation. + */ + labels: Array; + + /** + * The model to use for the evaluation. Must support structured outputs. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The labels that indicate a passing result. Must be a subset of labels. + */ + passing_labels: Array; + + /** + * The object type, which is always `label_model`. + */ + type: 'label_model'; +} + +export namespace LabelModelGrader { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } +} + +/** + * A MultiGrader object combines the output of multiple graders to produce a single + * score. + */ +export interface MultiGrader { + /** + * A formula to calculate the output based on grader results. + */ + calculate_output: string; + + graders: Record< + string, + StringCheckGrader | TextSimilarityGrader | PythonGrader | ScoreModelGrader | LabelModelGrader + >; + + /** + * The name of the grader. + */ + name: string; + + /** + * The type of grader. + */ + type: 'multi'; +} + +/** + * A PythonGrader object that runs a python script on the input. + */ +export interface PythonGrader { + /** + * The name of the grader. + */ + name: string; + + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; +} + +/** + * A ScoreModelGrader object that uses a model to assign a score to the input. + */ +export interface ScoreModelGrader { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; +} + +export namespace ScoreModelGrader { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } +} + +/** + * A StringCheckGrader object that performs a string comparison between input and + * reference using a specified operation. + */ +export interface StringCheckGrader { + /** + * The input text. This may include template strings. + */ + input: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`. + */ + operation: 'eq' | 'ne' | 'like' | 'ilike'; + + /** + * The reference text. This may include template strings. + */ + reference: string; + + /** + * The object type, which is always `string_check`. + */ + type: 'string_check'; +} + +/** + * A TextSimilarityGrader object which grades text based on similarity metrics. + */ +export interface TextSimilarityGrader { + /** + * The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, + * `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + */ + evaluation_metric: + | 'fuzzy_match' + | 'bleu' + | 'gleu' + | 'meteor' + | 'rouge_1' + | 'rouge_2' + | 'rouge_3' + | 'rouge_4' + | 'rouge_5' + | 'rouge_l'; + + /** + * The text being graded. + */ + input: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The text being graded against. + */ + reference: string; + + /** + * The type of grader. + */ + type: 'text_similarity'; +} + +export declare namespace GraderModels { + export { + type LabelModelGrader as LabelModelGrader, + type MultiGrader as MultiGrader, + type PythonGrader as PythonGrader, + type ScoreModelGrader as ScoreModelGrader, + type StringCheckGrader as StringCheckGrader, + type TextSimilarityGrader as TextSimilarityGrader, + }; +} diff --git a/src/resources/graders/graders.ts b/src/resources/graders/graders.ts new file mode 100644 index 000000000..de3297450 --- /dev/null +++ b/src/resources/graders/graders.ts @@ -0,0 +1,31 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as GraderModelsAPI from './grader-models'; +import { + GraderModels, + LabelModelGrader, + MultiGrader, + PythonGrader, + ScoreModelGrader, + StringCheckGrader, + TextSimilarityGrader, +} from './grader-models'; + +export class Graders extends APIResource { + graderModels: GraderModelsAPI.GraderModels = new GraderModelsAPI.GraderModels(this._client); +} + +Graders.GraderModels = GraderModels; + +export declare namespace Graders { + export { + GraderModels as GraderModels, + type LabelModelGrader as LabelModelGrader, + type MultiGrader as MultiGrader, + type PythonGrader as PythonGrader, + type ScoreModelGrader as ScoreModelGrader, + type StringCheckGrader as StringCheckGrader, + type TextSimilarityGrader as TextSimilarityGrader, + }; +} diff --git a/src/resources/graders/index.ts b/src/resources/graders/index.ts new file mode 100644 index 000000000..82d557a6a --- /dev/null +++ b/src/resources/graders/index.ts @@ -0,0 +1,12 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + GraderModels, + type LabelModelGrader, + type MultiGrader, + type PythonGrader, + type ScoreModelGrader, + type StringCheckGrader, + type TextSimilarityGrader, +} from './grader-models'; +export { Graders } from './graders'; diff --git a/src/resources/index.ts b/src/resources/index.ts index 0d8ec9220..9d827615c 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -33,10 +33,7 @@ export { EvalListResponsesPage, Evals, type EvalCustomDataSourceConfig, - type EvalLabelModelGrader, type EvalStoredCompletionsDataSourceConfig, - type EvalStringCheckGrader, - type EvalTextSimilarityGrader, type EvalCreateResponse, type EvalRetrieveResponse, type EvalUpdateResponse, @@ -57,6 +54,7 @@ export { type FileListParams, } from './files'; export { FineTuning } from './fine-tuning/fine-tuning'; +export { Graders } from './graders/graders'; export { Images, type Image, diff --git a/tests/api-resources/fine-tuning/alpha/graders.test.ts b/tests/api-resources/fine-tuning/alpha/graders.test.ts new file mode 100644 index 000000000..8e47a4c42 --- /dev/null +++ b/tests/api-resources/fine-tuning/alpha/graders.test.ts @@ -0,0 +1,53 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource graders', () => { + test('run: only required params', async () => { + const responsePromise = client.fineTuning.alpha.graders.run({ + grader: { input: 'input', name: 'name', operation: 'eq', reference: 'reference', type: 'string_check' }, + model_sample: 'model_sample', + reference_answer: 'string', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('run: required and optional params', async () => { + const response = await client.fineTuning.alpha.graders.run({ + grader: { input: 'input', name: 'name', operation: 'eq', reference: 'reference', type: 'string_check' }, + model_sample: 'model_sample', + reference_answer: 'string', + }); + }); + + test('validate: only required params', async () => { + const responsePromise = client.fineTuning.alpha.graders.validate({ + grader: { input: 'input', name: 'name', operation: 'eq', reference: 'reference', type: 'string_check' }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('validate: required and optional params', async () => { + const response = await client.fineTuning.alpha.graders.validate({ + grader: { input: 'input', name: 'name', operation: 'eq', reference: 'reference', type: 'string_check' }, + }); + }); +}); diff --git a/tests/api-resources/fine-tuning/jobs/jobs.test.ts b/tests/api-resources/fine-tuning/jobs/jobs.test.ts index 4de83a8b7..fe8c9efee 100644 --- a/tests/api-resources/fine-tuning/jobs/jobs.test.ts +++ b/tests/api-resources/fine-tuning/jobs/jobs.test.ts @@ -35,6 +35,7 @@ describe('resource jobs', () => { }, ], method: { + type: 'supervised', dpo: { hyperparameters: { batch_size: 'auto', @@ -43,10 +44,27 @@ describe('resource jobs', () => { n_epochs: 'auto', }, }, + reinforcement: { + grader: { + input: 'input', + name: 'name', + operation: 'eq', + reference: 'reference', + type: 'string_check', + }, + hyperparameters: { + batch_size: 'auto', + compute_multiplier: 'auto', + eval_interval: 'auto', + eval_samples: 'auto', + learning_rate_multiplier: 'auto', + n_epochs: 'auto', + reasoning_effort: 'default', + }, + }, supervised: { hyperparameters: { batch_size: 'auto', learning_rate_multiplier: 'auto', n_epochs: 'auto' }, }, - type: 'supervised', }, seed: 42, suffix: 'x', @@ -143,4 +161,40 @@ describe('resource jobs', () => { ), ).rejects.toThrow(OpenAI.NotFoundError); }); + + test('pause', async () => { + const responsePromise = client.fineTuning.jobs.pause('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('pause: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.jobs.pause('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('resume', async () => { + const responsePromise = client.fineTuning.jobs.resume('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('resume: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.jobs.resume('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); }); From bc9f15fc7d1f4acf625adc3603577b06d59cdc5c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 8 May 2025 17:24:55 +0000 Subject: [PATCH 239/246] release: 4.98.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 23 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 4e19f03d6..a279d9124 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.97.0" + ".": "4.98.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fa637742..2f1a39177 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 4.98.0 (2025-05-08) + +Full Changelog: [v4.97.0...v4.98.0](https://github.com/openai/openai-node/compare/v4.97.0...v4.98.0) + +### Features + +* **api:** Add reinforcement fine-tuning api support ([4aa7a79](https://github.com/openai/openai-node/commit/4aa7a7954c63caa26cc1640ace56093fe1cafa04)) + + +### Chores + +* **ci:** bump node version for release workflows ([2961f63](https://github.com/openai/openai-node/commit/2961f63c4d5b8ae8efdf8ea6581aa83c6b0f722e)) +* **internal:** fix formatting ([91a44fe](https://github.com/openai/openai-node/commit/91a44fe11c0847dc50d48a03a8d409ac4bece37a)) + + +### Documentation + +* add examples to tsdocs ([7d841b7](https://github.com/openai/openai-node/commit/7d841b7f98eb542a398fb9de12056125e8d6cb22)) + ## 4.97.0 (2025-05-02) Full Changelog: [v4.96.2...v4.97.0](https://github.com/openai/openai-node/compare/v4.96.2...v4.97.0) diff --git a/jsr.json b/jsr.json index fd3ca4a41..25bbc9ac2 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.97.0", + "version": "4.98.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 8b9281b35..d34efceb0 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.97.0", + "version": "4.98.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 97cbc5900..f64cc03ff 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.97.0'; // x-release-please-version +export const VERSION = '4.98.0'; // x-release-please-version From ea1d56c979ad7136aa584a773904b0570ba14783 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 May 2025 21:29:14 +0000 Subject: [PATCH 240/246] feat(api): responses x eval api --- .stats.yml | 6 +- api.md | 2 + src/index.ts | 2 + src/resources/audio/transcriptions.ts | 34 + src/resources/embeddings.ts | 7 +- src/resources/evals/evals.ts | 89 +- src/resources/evals/index.ts | 2 + src/resources/evals/runs/index.ts | 1 + src/resources/evals/runs/runs.ts | 1444 +++-------------- src/resources/fine-tuning/jobs/jobs.ts | 2 +- src/resources/index.ts | 1 + .../audio/transcriptions.test.ts | 1 + 12 files changed, 375 insertions(+), 1216 deletions(-) diff --git a/.stats.yml b/.stats.yml index 5f1bee851..11ba2b010 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 101 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-794a6ed3c3d3d77887564755168056af8a426b17cf1ec721e3a300503dc22a41.yml -openapi_spec_hash: 25a81c220713cd5b0bafc221d1dfa79a -config_hash: 0b768ed1b56c6d82816f0fa40dc4aaf5 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-161ca7f1cfd7b33c1fc07d0ce25dfe4be5a7271c394f4cb526b7fb21b0729900.yml +openapi_spec_hash: 602e14add4bee018c6774e320ce309b8 +config_hash: 7da27f7260075e8813ddcea542fba1bf diff --git a/api.md b/api.md index cad696e7e..665dfaeed 100644 --- a/api.md +++ b/api.md @@ -709,6 +709,7 @@ Methods: Types: - EvalCustomDataSourceConfig +- EvalLogsDataSourceConfig - EvalStoredCompletionsDataSourceConfig - EvalCreateResponse - EvalRetrieveResponse @@ -730,6 +731,7 @@ Types: - CreateEvalCompletionsRunDataSource - CreateEvalJSONLRunDataSource +- CreateEvalResponsesRunDataSource - EvalAPIError - RunCreateResponse - RunRetrieveResponse diff --git a/src/index.ts b/src/index.ts index 537c18f43..b51da51c5 100644 --- a/src/index.ts +++ b/src/index.ts @@ -74,6 +74,7 @@ import { EvalListParams, EvalListResponse, EvalListResponsesPage, + EvalLogsDataSourceConfig, EvalRetrieveResponse, EvalStoredCompletionsDataSourceConfig, EvalUpdateParams, @@ -533,6 +534,7 @@ export declare namespace OpenAI { export { Evals as Evals, type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, + type EvalLogsDataSourceConfig as EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse as EvalCreateResponse, type EvalRetrieveResponse as EvalRetrieveResponse, diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 8d563e0ba..9e5310874 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -324,6 +324,14 @@ export interface TranscriptionCreateParamsBase< */ model: (string & {}) | AudioAPI.AudioModel; + /** + * Controls how the audio is cut into chunks. When set to `"auto"`, the server + * first normalizes loudness and then uses voice activity detection (VAD) to choose + * boundaries. `server_vad` object can be provided to tweak VAD detection + * parameters manually. If unset, the audio is transcribed as a single block. + */ + chunking_strategy?: 'auto' | TranscriptionCreateParams.VadConfig | null; + /** * Additional information to include in the transcription response. `logprobs` will * return the log probabilities of the tokens in the response to understand the @@ -387,6 +395,32 @@ export interface TranscriptionCreateParamsBase< } export namespace TranscriptionCreateParams { + export interface VadConfig { + /** + * Must be set to `server_vad` to enable manual chunking using server side VAD. + */ + type: 'server_vad'; + + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). With shorter values + * the model will respond more quickly, but may jump in on short pauses from the + * user. + */ + silence_duration_ms?: number; + + /** + * Sensitivity threshold (0.0 to 1.0) for voice activity detection. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + } + export type TranscriptionCreateParamsNonStreaming = TranscriptionsAPI.TranscriptionCreateParamsNonStreaming; export type TranscriptionCreateParamsStreaming = TranscriptionsAPI.TranscriptionCreateParamsStreaming; } diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index cc040abff..fb02a7654 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -130,11 +130,12 @@ export interface EmbeddingCreateParams { * Input text to embed, encoded as a string or array of tokens. To embed multiple * inputs in a single request, pass an array of strings or array of token arrays. * The input must not exceed the max input tokens for the model (8192 tokens for - * `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + * all embedding models), cannot be an empty string, and any array must be 2048 * dimensions or less. * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - * for counting tokens. Some models may also impose a limit on total number of - * tokens summed across inputs. + * for counting tokens. In addition to the per-input token limit, all embedding + * models enforce a maximum of 300,000 tokens summed across all inputs in a single + * request. */ input: string | Array | Array | Array>; diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts index 0f7166df4..396747af2 100644 --- a/src/resources/evals/evals.ts +++ b/src/resources/evals/evals.ts @@ -10,6 +10,7 @@ import * as RunsAPI from './runs/runs'; import { CreateEvalCompletionsRunDataSource, CreateEvalJSONLRunDataSource, + CreateEvalResponsesRunDataSource, EvalAPIError, RunCancelResponse, RunCreateParams, @@ -105,11 +106,37 @@ export interface EvalCustomDataSourceConfig { } /** - * A StoredCompletionsDataSourceConfig which specifies the metadata property of - * your stored completions query. This is usually metadata like `usecase=chatbot` - * or `prompt-version=v2`, etc. The schema returned by this data source config is - * used to defined what variables are available in your evals. `item` and `sample` - * are both defined when using this data source config. + * A LogsDataSourceConfig which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + * schema returned by this data source config is used to defined what variables are + * available in your evals. `item` and `sample` are both defined when using this + * data source config. + */ +export interface EvalLogsDataSourceConfig { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; +} + +/** + * @deprecated Deprecated in favor of LogsDataSourceConfig. */ export interface EvalStoredCompletionsDataSourceConfig { /** @@ -119,9 +146,9 @@ export interface EvalStoredCompletionsDataSourceConfig { schema: Record; /** - * The type of data source. Always `stored_completions`. + * The type of data source. Always `stored-completions`. */ - type: 'stored_completions'; + type: 'stored-completions'; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -156,7 +183,10 @@ export interface EvalCreateResponse { /** * Configuration of data sources used in runs of the evaluation. */ - data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + data_source_config: + | EvalCustomDataSourceConfig + | EvalLogsDataSourceConfig + | EvalStoredCompletionsDataSourceConfig; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -244,7 +274,10 @@ export interface EvalRetrieveResponse { /** * Configuration of data sources used in runs of the evaluation. */ - data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + data_source_config: + | EvalCustomDataSourceConfig + | EvalLogsDataSourceConfig + | EvalStoredCompletionsDataSourceConfig; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -332,7 +365,10 @@ export interface EvalUpdateResponse { /** * Configuration of data sources used in runs of the evaluation. */ - data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + data_source_config: + | EvalCustomDataSourceConfig + | EvalLogsDataSourceConfig + | EvalStoredCompletionsDataSourceConfig; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -420,7 +456,10 @@ export interface EvalListResponse { /** * Configuration of data sources used in runs of the evaluation. */ - data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + data_source_config: + | EvalCustomDataSourceConfig + | EvalLogsDataSourceConfig + | EvalStoredCompletionsDataSourceConfig; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -498,7 +537,7 @@ export interface EvalCreateParams { /** * The configuration for the data source used for the evaluation runs. */ - data_source_config: EvalCreateParams.Custom | EvalCreateParams.StoredCompletions; + data_source_config: EvalCreateParams.Custom | EvalCreateParams.Logs | EvalCreateParams.StoredCompletions; /** * A list of graders for all eval runs in this group. @@ -555,15 +594,29 @@ export namespace EvalCreateParams { } /** - * A data source config which specifies the metadata property of your stored - * completions query. This is usually metadata like `usecase=chatbot` or - * `prompt-version=v2`, etc. + * A data source config which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + */ + export interface Logs { + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Metadata filters for the logs data source. + */ + metadata?: Record; + } + + /** + * Deprecated in favor of LogsDataSourceConfig. */ export interface StoredCompletions { /** - * The type of data source. Always `stored_completions`. + * The type of data source. Always `stored-completions`. */ - type: 'stored_completions'; + type: 'stored-completions'; /** * Metadata filters for the stored completions data source. @@ -733,6 +786,7 @@ Evals.RunListResponsesPage = RunListResponsesPage; export declare namespace Evals { export { type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, + type EvalLogsDataSourceConfig as EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse as EvalCreateResponse, type EvalRetrieveResponse as EvalRetrieveResponse, @@ -749,6 +803,7 @@ export declare namespace Evals { Runs as Runs, type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, + type CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource, type EvalAPIError as EvalAPIError, type RunCreateResponse as RunCreateResponse, type RunRetrieveResponse as RunRetrieveResponse, diff --git a/src/resources/evals/index.ts b/src/resources/evals/index.ts index b2627fbf3..856a4088a 100644 --- a/src/resources/evals/index.ts +++ b/src/resources/evals/index.ts @@ -4,6 +4,7 @@ export { EvalListResponsesPage, Evals, type EvalCustomDataSourceConfig, + type EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse, type EvalRetrieveResponse, @@ -19,6 +20,7 @@ export { Runs, type CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource, + type CreateEvalResponsesRunDataSource, type EvalAPIError, type RunCreateResponse, type RunRetrieveResponse, diff --git a/src/resources/evals/runs/index.ts b/src/resources/evals/runs/index.ts index d0e18bff4..2e5d1a884 100644 --- a/src/resources/evals/runs/index.ts +++ b/src/resources/evals/runs/index.ts @@ -12,6 +12,7 @@ export { Runs, type CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource, + type CreateEvalResponsesRunDataSource, type EvalAPIError, type RunCreateResponse, type RunRetrieveResponse, diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts index 50c07a514..9aec3a1c6 100644 --- a/src/resources/evals/runs/runs.ts +++ b/src/resources/evals/runs/runs.ts @@ -321,6 +321,242 @@ export namespace CreateEvalJSONLRunDataSource { } } +/** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ +export interface CreateEvalResponsesRunDataSource { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: + | CreateEvalResponsesRunDataSource.FileContent + | CreateEvalResponsesRunDataSource.FileID + | CreateEvalResponsesRunDataSource.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + input_messages?: CreateEvalResponsesRunDataSource.Template | CreateEvalResponsesRunDataSource.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: CreateEvalResponsesRunDataSource.SamplingParams; +} + +export namespace CreateEvalResponsesRunDataSource { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } +} + /** * An object representing an error response from the Eval API. */ @@ -356,7 +592,7 @@ export interface RunCreateResponse { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | RunCreateResponse.Completions; + | CreateEvalResponsesRunDataSource; /** * An object representing an error response from the Eval API. @@ -419,241 +655,7 @@ export interface RunCreateResponse { status: string; } -export namespace RunCreateResponse { - /** - * A ResponsesRunDataSource object describing a model sampling configuration. - */ - export interface Completions { - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - source: Completions.FileContent | Completions.FileID | Completions.Responses; - - /** - * The type of run data source. Always `completions`. - */ - type: 'completions'; - - input_messages?: Completions.Template | Completions.ItemReference; - - /** - * The name of the model to use for generating completions (e.g. "o3-mini"). - */ - model?: string; - - sampling_params?: Completions.SamplingParams; - } - - export namespace Completions { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } - - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - export interface Responses { - /** - * The type of run data source. Always `responses`. - */ - type: 'responses'; - - /** - * Whether to allow parallel tool calls. This is a query parameter used to select - * responses. - */ - allow_parallel_tool_calls?: boolean | null; - - /** - * Only include items created after this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_after?: number | null; - - /** - * Only include items created before this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_before?: number | null; - - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - - /** - * Optional search string for instructions. This is a query parameter used to - * select responses. - */ - instructions_search?: string | null; - - /** - * Metadata filter for the responses. This is a query parameter used to select - * responses. - */ - metadata?: unknown | null; - - /** - * The name of the model to find responses for. This is a query parameter used to - * select responses. - */ - model?: string | null; - - /** - * Optional reasoning effort parameter. This is a query parameter used to select - * responses. - */ - reasoning_effort?: Shared.ReasoningEffort | null; - - /** - * Sampling temperature. This is a query parameter used to select responses. - */ - temperature?: number | null; - - /** - * Nucleus sampling parameter. This is a query parameter used to select responses. - */ - top_p?: number | null; - - /** - * List of user identifiers. This is a query parameter used to select responses. - */ - users?: Array | null; - } - - export interface Template { - /** - * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. - */ - template: Array; - - /** - * The type of input messages. Always `template`. - */ - type: 'template'; - } - - export namespace Template { - export interface ChatMessage { - /** - * The content of the message. - */ - content: string; - - /** - * The role of the message (e.g. "system", "assistant", "user"). - */ - role: string; - } - - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } - } - - export interface ItemReference { - /** - * A reference to a variable in the "item" namespace. Ie, "item.name" - */ - item_reference: string; - - /** - * The type of input messages. Always `item_reference`. - */ - type: 'item_reference'; - } - - export interface SamplingParams { - /** - * The maximum number of tokens in the generated output. - */ - max_completion_tokens?: number; - - /** - * A seed value to initialize the randomness, during sampling. - */ - seed?: number; - - /** - * A higher temperature increases randomness in the outputs. - */ - temperature?: number; - - /** - * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. - */ - top_p?: number; - } - } - +export namespace RunCreateResponse { export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -749,7 +751,7 @@ export interface RunRetrieveResponse { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | RunRetrieveResponse.Completions; + | CreateEvalResponsesRunDataSource; /** * An object representing an error response from the Eval API. @@ -813,240 +815,6 @@ export interface RunRetrieveResponse { } export namespace RunRetrieveResponse { - /** - * A ResponsesRunDataSource object describing a model sampling configuration. - */ - export interface Completions { - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - source: Completions.FileContent | Completions.FileID | Completions.Responses; - - /** - * The type of run data source. Always `completions`. - */ - type: 'completions'; - - input_messages?: Completions.Template | Completions.ItemReference; - - /** - * The name of the model to use for generating completions (e.g. "o3-mini"). - */ - model?: string; - - sampling_params?: Completions.SamplingParams; - } - - export namespace Completions { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } - - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - export interface Responses { - /** - * The type of run data source. Always `responses`. - */ - type: 'responses'; - - /** - * Whether to allow parallel tool calls. This is a query parameter used to select - * responses. - */ - allow_parallel_tool_calls?: boolean | null; - - /** - * Only include items created after this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_after?: number | null; - - /** - * Only include items created before this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_before?: number | null; - - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - - /** - * Optional search string for instructions. This is a query parameter used to - * select responses. - */ - instructions_search?: string | null; - - /** - * Metadata filter for the responses. This is a query parameter used to select - * responses. - */ - metadata?: unknown | null; - - /** - * The name of the model to find responses for. This is a query parameter used to - * select responses. - */ - model?: string | null; - - /** - * Optional reasoning effort parameter. This is a query parameter used to select - * responses. - */ - reasoning_effort?: Shared.ReasoningEffort | null; - - /** - * Sampling temperature. This is a query parameter used to select responses. - */ - temperature?: number | null; - - /** - * Nucleus sampling parameter. This is a query parameter used to select responses. - */ - top_p?: number | null; - - /** - * List of user identifiers. This is a query parameter used to select responses. - */ - users?: Array | null; - } - - export interface Template { - /** - * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. - */ - template: Array; - - /** - * The type of input messages. Always `template`. - */ - type: 'template'; - } - - export namespace Template { - export interface ChatMessage { - /** - * The content of the message. - */ - content: string; - - /** - * The role of the message (e.g. "system", "assistant", "user"). - */ - role: string; - } - - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } - } - - export interface ItemReference { - /** - * A reference to a variable in the "item" namespace. Ie, "item.name" - */ - item_reference: string; - - /** - * The type of input messages. Always `item_reference`. - */ - type: 'item_reference'; - } - - export interface SamplingParams { - /** - * The maximum number of tokens in the generated output. - */ - max_completion_tokens?: number; - - /** - * A seed value to initialize the randomness, during sampling. - */ - seed?: number; - - /** - * A higher temperature increases randomness in the outputs. - */ - temperature?: number; - - /** - * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. - */ - top_p?: number; - } - } - export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -1142,7 +910,7 @@ export interface RunListResponse { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | RunListResponse.Completions; + | CreateEvalResponsesRunDataSource; /** * An object representing an error response from the Eval API. @@ -1196,250 +964,16 @@ export interface RunListResponse { /** * Counters summarizing the outcomes of the evaluation run. - */ - result_counts: RunListResponse.ResultCounts; - - /** - * The status of the evaluation run. - */ - status: string; -} - -export namespace RunListResponse { - /** - * A ResponsesRunDataSource object describing a model sampling configuration. - */ - export interface Completions { - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - source: Completions.FileContent | Completions.FileID | Completions.Responses; - - /** - * The type of run data source. Always `completions`. - */ - type: 'completions'; - - input_messages?: Completions.Template | Completions.ItemReference; - - /** - * The name of the model to use for generating completions (e.g. "o3-mini"). - */ - model?: string; - - sampling_params?: Completions.SamplingParams; - } - - export namespace Completions { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } - - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - export interface Responses { - /** - * The type of run data source. Always `responses`. - */ - type: 'responses'; - - /** - * Whether to allow parallel tool calls. This is a query parameter used to select - * responses. - */ - allow_parallel_tool_calls?: boolean | null; - - /** - * Only include items created after this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_after?: number | null; - - /** - * Only include items created before this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_before?: number | null; - - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - - /** - * Optional search string for instructions. This is a query parameter used to - * select responses. - */ - instructions_search?: string | null; - - /** - * Metadata filter for the responses. This is a query parameter used to select - * responses. - */ - metadata?: unknown | null; - - /** - * The name of the model to find responses for. This is a query parameter used to - * select responses. - */ - model?: string | null; - - /** - * Optional reasoning effort parameter. This is a query parameter used to select - * responses. - */ - reasoning_effort?: Shared.ReasoningEffort | null; - - /** - * Sampling temperature. This is a query parameter used to select responses. - */ - temperature?: number | null; - - /** - * Nucleus sampling parameter. This is a query parameter used to select responses. - */ - top_p?: number | null; - - /** - * List of user identifiers. This is a query parameter used to select responses. - */ - users?: Array | null; - } - - export interface Template { - /** - * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. - */ - template: Array; - - /** - * The type of input messages. Always `template`. - */ - type: 'template'; - } - - export namespace Template { - export interface ChatMessage { - /** - * The content of the message. - */ - content: string; - - /** - * The role of the message (e.g. "system", "assistant", "user"). - */ - role: string; - } - - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } - } - - export interface ItemReference { - /** - * A reference to a variable in the "item" namespace. Ie, "item.name" - */ - item_reference: string; - - /** - * The type of input messages. Always `item_reference`. - */ - type: 'item_reference'; - } - - export interface SamplingParams { - /** - * The maximum number of tokens in the generated output. - */ - max_completion_tokens?: number; - - /** - * A seed value to initialize the randomness, during sampling. - */ - seed?: number; - - /** - * A higher temperature increases randomness in the outputs. - */ - temperature?: number; - - /** - * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. - */ - top_p?: number; - } - } + */ + result_counts: RunListResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} +export namespace RunListResponse { export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -1543,7 +1077,7 @@ export interface RunCancelResponse { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | RunCancelResponse.Completions; + | CreateEvalResponsesRunDataSource; /** * An object representing an error response from the Eval API. @@ -1607,240 +1141,6 @@ export interface RunCancelResponse { } export namespace RunCancelResponse { - /** - * A ResponsesRunDataSource object describing a model sampling configuration. - */ - export interface Completions { - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - source: Completions.FileContent | Completions.FileID | Completions.Responses; - - /** - * The type of run data source. Always `completions`. - */ - type: 'completions'; - - input_messages?: Completions.Template | Completions.ItemReference; - - /** - * The name of the model to use for generating completions (e.g. "o3-mini"). - */ - model?: string; - - sampling_params?: Completions.SamplingParams; - } - - export namespace Completions { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } - - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - export interface Responses { - /** - * The type of run data source. Always `responses`. - */ - type: 'responses'; - - /** - * Whether to allow parallel tool calls. This is a query parameter used to select - * responses. - */ - allow_parallel_tool_calls?: boolean | null; - - /** - * Only include items created after this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_after?: number | null; - - /** - * Only include items created before this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_before?: number | null; - - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - - /** - * Optional search string for instructions. This is a query parameter used to - * select responses. - */ - instructions_search?: string | null; - - /** - * Metadata filter for the responses. This is a query parameter used to select - * responses. - */ - metadata?: unknown | null; - - /** - * The name of the model to find responses for. This is a query parameter used to - * select responses. - */ - model?: string | null; - - /** - * Optional reasoning effort parameter. This is a query parameter used to select - * responses. - */ - reasoning_effort?: Shared.ReasoningEffort | null; - - /** - * Sampling temperature. This is a query parameter used to select responses. - */ - temperature?: number | null; - - /** - * Nucleus sampling parameter. This is a query parameter used to select responses. - */ - top_p?: number | null; - - /** - * List of user identifiers. This is a query parameter used to select responses. - */ - users?: Array | null; - } - - export interface Template { - /** - * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. - */ - template: Array; - - /** - * The type of input messages. Always `template`. - */ - type: 'template'; - } - - export namespace Template { - export interface ChatMessage { - /** - * The content of the message. - */ - content: string; - - /** - * The role of the message (e.g. "system", "assistant", "user"). - */ - role: string; - } - - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } - } - - export interface ItemReference { - /** - * A reference to a variable in the "item" namespace. Ie, "item.name" - */ - item_reference: string; - - /** - * The type of input messages. Always `item_reference`. - */ - type: 'item_reference'; - } - - export interface SamplingParams { - /** - * The maximum number of tokens in the generated output. - */ - max_completion_tokens?: number; - - /** - * A seed value to initialize the randomness, during sampling. - */ - seed?: number; - - /** - * A higher temperature increases randomness in the outputs. - */ - temperature?: number; - - /** - * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. - */ - top_p?: number; - } - } - export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -1923,7 +1223,7 @@ export interface RunCreateParams { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | RunCreateParams.CreateEvalResponsesRunDataSource; + | CreateEvalResponsesRunDataSource; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -1941,247 +1241,6 @@ export interface RunCreateParams { name?: string; } -export namespace RunCreateParams { - /** - * A ResponsesRunDataSource object describing a model sampling configuration. - */ - export interface CreateEvalResponsesRunDataSource { - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - source: - | CreateEvalResponsesRunDataSource.FileContent - | CreateEvalResponsesRunDataSource.FileID - | CreateEvalResponsesRunDataSource.Responses; - - /** - * The type of run data source. Always `completions`. - */ - type: 'completions'; - - input_messages?: - | CreateEvalResponsesRunDataSource.Template - | CreateEvalResponsesRunDataSource.ItemReference; - - /** - * The name of the model to use for generating completions (e.g. "o3-mini"). - */ - model?: string; - - sampling_params?: CreateEvalResponsesRunDataSource.SamplingParams; - } - - export namespace CreateEvalResponsesRunDataSource { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } - - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - export interface Responses { - /** - * The type of run data source. Always `responses`. - */ - type: 'responses'; - - /** - * Whether to allow parallel tool calls. This is a query parameter used to select - * responses. - */ - allow_parallel_tool_calls?: boolean | null; - - /** - * Only include items created after this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_after?: number | null; - - /** - * Only include items created before this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_before?: number | null; - - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - - /** - * Optional search string for instructions. This is a query parameter used to - * select responses. - */ - instructions_search?: string | null; - - /** - * Metadata filter for the responses. This is a query parameter used to select - * responses. - */ - metadata?: unknown | null; - - /** - * The name of the model to find responses for. This is a query parameter used to - * select responses. - */ - model?: string | null; - - /** - * Optional reasoning effort parameter. This is a query parameter used to select - * responses. - */ - reasoning_effort?: Shared.ReasoningEffort | null; - - /** - * Sampling temperature. This is a query parameter used to select responses. - */ - temperature?: number | null; - - /** - * Nucleus sampling parameter. This is a query parameter used to select responses. - */ - top_p?: number | null; - - /** - * List of user identifiers. This is a query parameter used to select responses. - */ - users?: Array | null; - } - - export interface Template { - /** - * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. - */ - template: Array; - - /** - * The type of input messages. Always `template`. - */ - type: 'template'; - } - - export namespace Template { - export interface ChatMessage { - /** - * The content of the message. - */ - content: string; - - /** - * The role of the message (e.g. "system", "assistant", "user"). - */ - role: string; - } - - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } - } - - export interface ItemReference { - /** - * A reference to a variable in the "item" namespace. Ie, "item.name" - */ - item_reference: string; - - /** - * The type of input messages. Always `item_reference`. - */ - type: 'item_reference'; - } - - export interface SamplingParams { - /** - * The maximum number of tokens in the generated output. - */ - max_completion_tokens?: number; - - /** - * A seed value to initialize the randomness, during sampling. - */ - seed?: number; - - /** - * A higher temperature increases randomness in the outputs. - */ - temperature?: number; - - /** - * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. - */ - top_p?: number; - } - } -} - export interface RunListParams extends CursorPageParams { /** * Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for @@ -2204,6 +1263,7 @@ export declare namespace Runs { export { type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, + type CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource, type EvalAPIError as EvalAPIError, type RunCreateResponse as RunCreateResponse, type RunRetrieveResponse as RunRetrieveResponse, diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 08616cd4f..cc5f55e9a 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -302,7 +302,7 @@ export namespace FineTuningJob { * Number of examples in each batch. A larger batch size means that model * parameters are updated less frequently, but with lower variance. */ - batch_size?: 'auto' | number; + batch_size?: unknown | 'auto' | number | null; /** * Scaling factor for the learning rate. A smaller learning rate may be useful to diff --git a/src/resources/index.ts b/src/resources/index.ts index 9d827615c..74d585595 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -33,6 +33,7 @@ export { EvalListResponsesPage, Evals, type EvalCustomDataSourceConfig, + type EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse, type EvalRetrieveResponse, diff --git a/tests/api-resources/audio/transcriptions.test.ts b/tests/api-resources/audio/transcriptions.test.ts index 2297677b4..ad76808d0 100644 --- a/tests/api-resources/audio/transcriptions.test.ts +++ b/tests/api-resources/audio/transcriptions.test.ts @@ -27,6 +27,7 @@ describe('resource transcriptions', () => { const response = await client.audio.transcriptions.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), model: 'gpt-4o-transcribe', + chunking_strategy: 'auto', include: ['logprobs'], language: 'language', prompt: 'prompt', From e83286b10b20d3e4c02903739b045af5cbf71cde Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 May 2025 23:47:30 +0000 Subject: [PATCH 241/246] feat(api): manual updates --- .stats.yml | 2 +- api.md | 5 + src/index.ts | 3 + src/resources/beta/beta.ts | 2 + src/resources/beta/index.ts | 1 + src/resources/beta/threads/index.ts | 1 + src/resources/beta/threads/runs/runs.ts | 44 +--- src/resources/beta/threads/threads.ts | 43 ++-- src/resources/evals/evals.ts | 49 +--- src/resources/evals/index.ts | 2 + src/resources/evals/runs/index.ts | 2 + src/resources/evals/runs/runs.ts | 233 ++++--------------- src/resources/graders/grader-models.ts | 94 +------- src/resources/index.ts | 1 + src/resources/shared.ts | 44 ++++ src/resources/vector-stores/index.ts | 1 + src/resources/vector-stores/vector-stores.ts | 75 ++---- 17 files changed, 155 insertions(+), 447 deletions(-) diff --git a/.stats.yml b/.stats.yml index 11ba2b010..202b915dc 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 101 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-161ca7f1cfd7b33c1fc07d0ce25dfe4be5a7271c394f4cb526b7fb21b0729900.yml openapi_spec_hash: 602e14add4bee018c6774e320ce309b8 -config_hash: 7da27f7260075e8813ddcea542fba1bf +config_hash: bdacc55eb995c15255ec82130eb8c3bb diff --git a/api.md b/api.md index 665dfaeed..c4170366f 100644 --- a/api.md +++ b/api.md @@ -7,6 +7,7 @@ Types: - ComparisonFilter - CompoundFilter - ErrorObject +- EvalItem - FunctionDefinition - FunctionParameters - Metadata @@ -304,6 +305,7 @@ Types: - StaticFileChunkingStrategyObjectParam - VectorStore - VectorStoreDeleted +- VectorStoreExpirationAfter - VectorStoreSearchResponse Methods: @@ -463,6 +465,7 @@ Types: - AssistantToolChoiceOption - Thread - ThreadDeleted +- TruncationObject Methods: @@ -733,6 +736,8 @@ Types: - CreateEvalJSONLRunDataSource - CreateEvalResponsesRunDataSource - EvalAPIError +- EvalJSONLFileContentSource +- EvalJSONLFileIDSource - RunCreateResponse - RunRetrieveResponse - RunListResponse diff --git a/src/index.ts b/src/index.ts index b51da51c5..c1612964a 100644 --- a/src/index.ts +++ b/src/index.ts @@ -101,6 +101,7 @@ import { VectorStore, VectorStoreCreateParams, VectorStoreDeleted, + VectorStoreExpirationAfter, VectorStoreListParams, VectorStoreSearchParams, VectorStoreSearchResponse, @@ -501,6 +502,7 @@ export declare namespace OpenAI { type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, + type VectorStoreExpirationAfter as VectorStoreExpirationAfter, type VectorStoreSearchResponse as VectorStoreSearchResponse, VectorStoresPage as VectorStoresPage, VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, @@ -552,6 +554,7 @@ export declare namespace OpenAI { export type ComparisonFilter = API.ComparisonFilter; export type CompoundFilter = API.CompoundFilter; export type ErrorObject = API.ErrorObject; + export type EvalItem = API.EvalItem; export type FunctionDefinition = API.FunctionDefinition; export type FunctionParameters = API.FunctionParameters; export type Metadata = API.Metadata; diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index 6282d4593..c32159776 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -90,6 +90,7 @@ import { ThreadDeleted, ThreadUpdateParams, Threads, + TruncationObject, } from './threads/threads'; import { Chat } from './chat/chat'; @@ -188,6 +189,7 @@ export declare namespace Beta { type AssistantToolChoiceOption as AssistantToolChoiceOption, type Thread as Thread, type ThreadDeleted as ThreadDeleted, + type TruncationObject as TruncationObject, type ThreadCreateParams as ThreadCreateParams, type ThreadUpdateParams as ThreadUpdateParams, type ThreadCreateAndRunParams as ThreadCreateAndRunParams, diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index b9cef17cb..296fdba75 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -29,6 +29,7 @@ export { type AssistantToolChoiceOption, type Thread, type ThreadDeleted, + type TruncationObject, type ThreadCreateParams, type ThreadUpdateParams, type ThreadCreateAndRunParams, diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts index f67a1edde..89d00dcf6 100644 --- a/src/resources/beta/threads/index.ts +++ b/src/resources/beta/threads/index.ts @@ -63,6 +63,7 @@ export { type AssistantToolChoiceOption, type Thread, type ThreadDeleted, + type TruncationObject, type ThreadCreateParams, type ThreadUpdateParams, type ThreadCreateAndRunParams, diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 25356df3c..608ef6481 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -564,7 +564,7 @@ export interface Run { * Controls for how a thread will be truncated prior to the run. Use this to * control the intial context window of the run. */ - truncation_strategy: Run.TruncationStrategy | null; + truncation_strategy: ThreadsAPI.TruncationObject | null; /** * Usage statistics related to the run. This value will be `null` if the run is not @@ -639,26 +639,6 @@ export namespace Run { } } - /** - * Controls for how a thread will be truncated prior to the run. Use this to - * control the intial context window of the run. - */ - export interface TruncationStrategy { - /** - * The truncation strategy to use for the thread. The default is `auto`. If set to - * `last_messages`, the thread will be truncated to the n most recent messages in - * the thread. When set to `auto`, messages in the middle of the thread will be - * dropped to fit the context length of the model, `max_prompt_tokens`. - */ - type: 'auto' | 'last_messages'; - - /** - * The number of most recent messages from the thread when constructing the context - * for the run. - */ - last_messages?: number | null; - } - /** * Usage statistics related to the run. This value will be `null` if the run is not * in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -862,7 +842,7 @@ export interface RunCreateParamsBase { * Body param: Controls for how a thread will be truncated prior to the run. Use * this to control the intial context window of the run. */ - truncation_strategy?: RunCreateParams.TruncationStrategy | null; + truncation_strategy?: ThreadsAPI.TruncationObject | null; } export namespace RunCreateParams { @@ -921,26 +901,6 @@ export namespace RunCreateParams { } } - /** - * Controls for how a thread will be truncated prior to the run. Use this to - * control the intial context window of the run. - */ - export interface TruncationStrategy { - /** - * The truncation strategy to use for the thread. The default is `auto`. If set to - * `last_messages`, the thread will be truncated to the n most recent messages in - * the thread. When set to `auto`, messages in the middle of the thread will be - * dropped to fit the context length of the model, `max_prompt_tokens`. - */ - type: 'auto' | 'last_messages'; - - /** - * The number of most recent messages from the thread when constructing the context - * for the run. - */ - last_messages?: number | null; - } - export type RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming; export type RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; } diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index c0c6bc8e4..8b0332fb8 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -346,6 +346,26 @@ export interface ThreadDeleted { object: 'thread.deleted'; } +/** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ +export interface TruncationObject { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; +} + export interface ThreadCreateParams { /** * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to @@ -734,7 +754,7 @@ export interface ThreadCreateAndRunParamsBase { * Controls for how a thread will be truncated prior to the run. Use this to * control the intial context window of the run. */ - truncation_strategy?: ThreadCreateAndRunParams.TruncationStrategy | null; + truncation_strategy?: TruncationObject | null; } export namespace ThreadCreateAndRunParams { @@ -965,26 +985,6 @@ export namespace ThreadCreateAndRunParams { } } - /** - * Controls for how a thread will be truncated prior to the run. Use this to - * control the intial context window of the run. - */ - export interface TruncationStrategy { - /** - * The truncation strategy to use for the thread. The default is `auto`. If set to - * `last_messages`, the thread will be truncated to the n most recent messages in - * the thread. When set to `auto`, messages in the middle of the thread will be - * dropped to fit the context length of the model, `max_prompt_tokens`. - */ - type: 'auto' | 'last_messages'; - - /** - * The number of most recent messages from the thread when constructing the context - * for the run. - */ - last_messages?: number | null; - } - export type ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; export type ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; } @@ -1684,6 +1684,7 @@ export declare namespace Threads { type AssistantToolChoiceOption as AssistantToolChoiceOption, type Thread as Thread, type ThreadDeleted as ThreadDeleted, + type TruncationObject as TruncationObject, type ThreadCreateParams as ThreadCreateParams, type ThreadUpdateParams as ThreadUpdateParams, type ThreadCreateAndRunParams as ThreadCreateAndRunParams, diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts index 396747af2..5370c51b9 100644 --- a/src/resources/evals/evals.ts +++ b/src/resources/evals/evals.ts @@ -5,13 +5,14 @@ import { isRequestOptions } from '../../core'; import * as Core from '../../core'; import * as Shared from '../shared'; import * as GraderModelsAPI from '../graders/grader-models'; -import * as ResponsesAPI from '../responses/responses'; import * as RunsAPI from './runs/runs'; import { CreateEvalCompletionsRunDataSource, CreateEvalJSONLRunDataSource, CreateEvalResponsesRunDataSource, EvalAPIError, + EvalJSONLFileContentSource, + EvalJSONLFileIDSource, RunCancelResponse, RunCreateParams, RunCreateResponse, @@ -633,7 +634,7 @@ export namespace EvalCreateParams { * A list of chat messages forming the prompt or context. May include variable * references to the "item" namespace, ie {{item.name}}. */ - input: Array; + input: Array; /** * The labels to classify to each item in the evaluation. @@ -673,48 +674,6 @@ export namespace EvalCreateParams { */ role: string; } - - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } } /** @@ -805,6 +764,8 @@ export declare namespace Evals { type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, type CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource, type EvalAPIError as EvalAPIError, + type EvalJSONLFileContentSource as EvalJSONLFileContentSource, + type EvalJSONLFileIDSource as EvalJSONLFileIDSource, type RunCreateResponse as RunCreateResponse, type RunRetrieveResponse as RunRetrieveResponse, type RunListResponse as RunListResponse, diff --git a/src/resources/evals/index.ts b/src/resources/evals/index.ts index 856a4088a..084fc9ad6 100644 --- a/src/resources/evals/index.ts +++ b/src/resources/evals/index.ts @@ -22,6 +22,8 @@ export { type CreateEvalJSONLRunDataSource, type CreateEvalResponsesRunDataSource, type EvalAPIError, + type EvalJSONLFileContentSource, + type EvalJSONLFileIDSource, type RunCreateResponse, type RunRetrieveResponse, type RunListResponse, diff --git a/src/resources/evals/runs/index.ts b/src/resources/evals/runs/index.ts index 2e5d1a884..8e13e67df 100644 --- a/src/resources/evals/runs/index.ts +++ b/src/resources/evals/runs/index.ts @@ -14,6 +14,8 @@ export { type CreateEvalJSONLRunDataSource, type CreateEvalResponsesRunDataSource, type EvalAPIError, + type EvalJSONLFileContentSource, + type EvalJSONLFileIDSource, type RunCreateResponse, type RunRetrieveResponse, type RunListResponse, diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts index 9aec3a1c6..dec4dcb51 100644 --- a/src/resources/evals/runs/runs.ts +++ b/src/resources/evals/runs/runs.ts @@ -88,8 +88,8 @@ export interface CreateEvalCompletionsRunDataSource { * A StoredCompletionsRunDataSource configuration describing a set of filters */ source: - | CreateEvalCompletionsRunDataSource.FileContent - | CreateEvalCompletionsRunDataSource.FileID + | EvalJSONLFileContentSource + | EvalJSONLFileIDSource | CreateEvalCompletionsRunDataSource.StoredCompletions; /** @@ -110,38 +110,6 @@ export interface CreateEvalCompletionsRunDataSource { } export namespace CreateEvalCompletionsRunDataSource { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } - /** * A StoredCompletionsRunDataSource configuration describing a set of filters */ @@ -187,7 +155,7 @@ export namespace CreateEvalCompletionsRunDataSource { * A list of chat messages forming the prompt or context. May include variable * references to the "item" namespace, ie {{item.name}}. */ - template: Array; + template: Array; /** * The type of input messages. Always `template`. @@ -195,50 +163,6 @@ export namespace CreateEvalCompletionsRunDataSource { type: 'template'; } - export namespace Template { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Message { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Message.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Message { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } - } - export interface ItemReference { /** * A reference to a variable in the "item" namespace. Ie, "item.name" @@ -279,7 +203,7 @@ export namespace CreateEvalCompletionsRunDataSource { * eval */ export interface CreateEvalJSONLRunDataSource { - source: CreateEvalJSONLRunDataSource.FileContent | CreateEvalJSONLRunDataSource.FileID; + source: EvalJSONLFileContentSource | EvalJSONLFileIDSource; /** * The type of data source. Always `jsonl`. @@ -287,40 +211,6 @@ export interface CreateEvalJSONLRunDataSource { type: 'jsonl'; } -export namespace CreateEvalJSONLRunDataSource { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } -} - /** * A ResponsesRunDataSource object describing a model sampling configuration. */ @@ -328,10 +218,7 @@ export interface CreateEvalResponsesRunDataSource { /** * A EvalResponsesSource object describing a run data source configuration. */ - source: - | CreateEvalResponsesRunDataSource.FileContent - | CreateEvalResponsesRunDataSource.FileID - | CreateEvalResponsesRunDataSource.Responses; + source: EvalJSONLFileContentSource | EvalJSONLFileIDSource | CreateEvalResponsesRunDataSource.Responses; /** * The type of run data source. Always `responses`. @@ -349,38 +236,6 @@ export interface CreateEvalResponsesRunDataSource { } export namespace CreateEvalResponsesRunDataSource { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } - /** * A EvalResponsesSource object describing a run data source configuration. */ @@ -458,7 +313,7 @@ export namespace CreateEvalResponsesRunDataSource { * A list of chat messages forming the prompt or context. May include variable * references to the "item" namespace, ie {{item.name}}. */ - template: Array; + template: Array; /** * The type of input messages. Always `template`. @@ -478,48 +333,6 @@ export namespace CreateEvalResponsesRunDataSource { */ role: string; } - - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } } export interface ItemReference { @@ -572,6 +385,38 @@ export interface EvalAPIError { message: string; } +export interface EvalJSONLFileContentSource { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; +} + +export namespace EvalJSONLFileContentSource { + export interface Content { + item: Record; + + sample?: Record; + } +} + +export interface EvalJSONLFileIDSource { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; +} + /** * A schema representing an evaluation run. */ @@ -1265,6 +1110,8 @@ export declare namespace Runs { type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, type CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource, type EvalAPIError as EvalAPIError, + type EvalJSONLFileContentSource as EvalJSONLFileContentSource, + type EvalJSONLFileIDSource as EvalJSONLFileIDSource, type RunCreateResponse as RunCreateResponse, type RunRetrieveResponse as RunRetrieveResponse, type RunListResponse as RunListResponse, diff --git a/src/resources/graders/grader-models.ts b/src/resources/graders/grader-models.ts index 9ee08f75f..8a1a0eddd 100644 --- a/src/resources/graders/grader-models.ts +++ b/src/resources/graders/grader-models.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; -import * as ResponsesAPI from '../responses/responses'; +import * as Shared from '../shared'; export class GraderModels extends APIResource {} @@ -10,7 +10,7 @@ export class GraderModels extends APIResource {} * the evaluation. */ export interface LabelModelGrader { - input: Array; + input: Array; /** * The labels to assign to each item in the evaluation. @@ -38,50 +38,6 @@ export interface LabelModelGrader { type: 'label_model'; } -export namespace LabelModelGrader { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } -} - /** * A MultiGrader object combines the output of multiple graders to produce a single * score. @@ -140,7 +96,7 @@ export interface ScoreModelGrader { /** * The input text. This may include template strings. */ - input: Array; + input: Array; /** * The model to use for the evaluation. @@ -168,50 +124,6 @@ export interface ScoreModelGrader { sampling_params?: unknown; } -export namespace ScoreModelGrader { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } -} - /** * A StringCheckGrader object that performs a string comparison between input and * reference using a specified operation. diff --git a/src/resources/index.ts b/src/resources/index.ts index 74d585595..0f21e596c 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -91,6 +91,7 @@ export { type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, + type VectorStoreExpirationAfter, type VectorStoreSearchResponse, type VectorStoreCreateParams, type VectorStoreUpdateParams, diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 1c0006b18..d0c2aaa49 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,5 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +import * as ResponsesAPI from './responses/responses'; + export type AllModels = | (string & {}) | ChatModel @@ -118,6 +120,48 @@ export interface ErrorObject { type: string; } +/** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ +export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; +} + +export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } +} + export interface FunctionDefinition { /** * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain diff --git a/src/resources/vector-stores/index.ts b/src/resources/vector-stores/index.ts index 9cbcbc0b2..c3c042387 100644 --- a/src/resources/vector-stores/index.ts +++ b/src/resources/vector-stores/index.ts @@ -30,6 +30,7 @@ export { type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, + type VectorStoreExpirationAfter, type VectorStoreSearchResponse, type VectorStoreCreateParams, type VectorStoreUpdateParams, diff --git a/src/resources/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts index 7d61e7fd6..66438be02 100644 --- a/src/resources/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -249,7 +249,7 @@ export interface VectorStore { /** * The expiration policy for a vector store. */ - expires_after?: VectorStore.ExpiresAfter; + expires_after?: VectorStoreExpirationAfter; /** * The Unix timestamp (in seconds) for when the vector store will expire. @@ -284,22 +284,6 @@ export namespace VectorStore { */ total: number; } - - /** - * The expiration policy for a vector store. - */ - export interface ExpiresAfter { - /** - * Anchor timestamp after which the expiration policy applies. Supported anchors: - * `last_active_at`. - */ - anchor: 'last_active_at'; - - /** - * The number of days after the anchor time that the vector store will expire. - */ - days: number; - } } export interface VectorStoreDeleted { @@ -310,6 +294,22 @@ export interface VectorStoreDeleted { object: 'vector_store.deleted'; } +/** + * The expiration policy for a vector store. + */ +export interface VectorStoreExpirationAfter { + /** + * Anchor timestamp after which the expiration policy applies. Supported anchors: + * `last_active_at`. + */ + anchor: 'last_active_at'; + + /** + * The number of days after the anchor time that the vector store will expire. + */ + days: number; +} + export interface VectorStoreSearchResponse { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -365,7 +365,7 @@ export interface VectorStoreCreateParams { /** * The expiration policy for a vector store. */ - expires_after?: VectorStoreCreateParams.ExpiresAfter; + expires_after?: VectorStoreExpirationAfter; /** * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that @@ -390,29 +390,11 @@ export interface VectorStoreCreateParams { name?: string; } -export namespace VectorStoreCreateParams { - /** - * The expiration policy for a vector store. - */ - export interface ExpiresAfter { - /** - * Anchor timestamp after which the expiration policy applies. Supported anchors: - * `last_active_at`. - */ - anchor: 'last_active_at'; - - /** - * The number of days after the anchor time that the vector store will expire. - */ - days: number; - } -} - export interface VectorStoreUpdateParams { /** * The expiration policy for a vector store. */ - expires_after?: VectorStoreUpdateParams.ExpiresAfter | null; + expires_after?: VectorStoreExpirationAfter | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -430,24 +412,6 @@ export interface VectorStoreUpdateParams { name?: string | null; } -export namespace VectorStoreUpdateParams { - /** - * The expiration policy for a vector store. - */ - export interface ExpiresAfter { - /** - * Anchor timestamp after which the expiration policy applies. Supported anchors: - * `last_active_at`. - */ - anchor: 'last_active_at'; - - /** - * The number of days after the anchor time that the vector store will expire. - */ - days: number; - } -} - export interface VectorStoreListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place @@ -521,6 +485,7 @@ export declare namespace VectorStores { type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, + type VectorStoreExpirationAfter as VectorStoreExpirationAfter, type VectorStoreSearchResponse as VectorStoreSearchResponse, VectorStoresPage as VectorStoresPage, VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, From 8cc63d351057678d474fe1a16e3077370c83fddb Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 17:11:15 +0000 Subject: [PATCH 242/246] feat(api): Updating Assistants and Evals API schemas --- .stats.yml | 6 +- api.md | 7 - src/index.ts | 5 - src/resources/beta/beta.ts | 2 - src/resources/beta/index.ts | 1 - src/resources/beta/threads/index.ts | 1 - src/resources/beta/threads/runs/runs.ts | 44 +- src/resources/beta/threads/threads.ts | 43 +- src/resources/evals/evals.ts | 210 ++- src/resources/evals/index.ts | 4 - src/resources/evals/runs/index.ts | 3 - src/resources/evals/runs/runs.ts | 1511 +++++++++++++++--- src/resources/graders/grader-models.ts | 96 +- src/resources/index.ts | 2 - src/resources/shared.ts | 45 +- src/resources/vector-stores/index.ts | 1 - src/resources/vector-stores/vector-stores.ts | 75 +- 17 files changed, 1680 insertions(+), 376 deletions(-) diff --git a/.stats.yml b/.stats.yml index 202b915dc..a3c5d081d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 101 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-161ca7f1cfd7b33c1fc07d0ce25dfe4be5a7271c394f4cb526b7fb21b0729900.yml -openapi_spec_hash: 602e14add4bee018c6774e320ce309b8 -config_hash: bdacc55eb995c15255ec82130eb8c3bb +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5fa16b9a02985ae06e41be14946a9c325dc672fb014b3c19abca65880c6990e6.yml +openapi_spec_hash: da3e669f65130043b1170048c0727890 +config_hash: d8d5fda350f6db77c784f35429741a2e diff --git a/api.md b/api.md index c4170366f..cad696e7e 100644 --- a/api.md +++ b/api.md @@ -7,7 +7,6 @@ Types: - ComparisonFilter - CompoundFilter - ErrorObject -- EvalItem - FunctionDefinition - FunctionParameters - Metadata @@ -305,7 +304,6 @@ Types: - StaticFileChunkingStrategyObjectParam - VectorStore - VectorStoreDeleted -- VectorStoreExpirationAfter - VectorStoreSearchResponse Methods: @@ -465,7 +463,6 @@ Types: - AssistantToolChoiceOption - Thread - ThreadDeleted -- TruncationObject Methods: @@ -712,7 +709,6 @@ Methods: Types: - EvalCustomDataSourceConfig -- EvalLogsDataSourceConfig - EvalStoredCompletionsDataSourceConfig - EvalCreateResponse - EvalRetrieveResponse @@ -734,10 +730,7 @@ Types: - CreateEvalCompletionsRunDataSource - CreateEvalJSONLRunDataSource -- CreateEvalResponsesRunDataSource - EvalAPIError -- EvalJSONLFileContentSource -- EvalJSONLFileIDSource - RunCreateResponse - RunRetrieveResponse - RunListResponse diff --git a/src/index.ts b/src/index.ts index c1612964a..537c18f43 100644 --- a/src/index.ts +++ b/src/index.ts @@ -74,7 +74,6 @@ import { EvalListParams, EvalListResponse, EvalListResponsesPage, - EvalLogsDataSourceConfig, EvalRetrieveResponse, EvalStoredCompletionsDataSourceConfig, EvalUpdateParams, @@ -101,7 +100,6 @@ import { VectorStore, VectorStoreCreateParams, VectorStoreDeleted, - VectorStoreExpirationAfter, VectorStoreListParams, VectorStoreSearchParams, VectorStoreSearchResponse, @@ -502,7 +500,6 @@ export declare namespace OpenAI { type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, - type VectorStoreExpirationAfter as VectorStoreExpirationAfter, type VectorStoreSearchResponse as VectorStoreSearchResponse, VectorStoresPage as VectorStoresPage, VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, @@ -536,7 +533,6 @@ export declare namespace OpenAI { export { Evals as Evals, type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, - type EvalLogsDataSourceConfig as EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse as EvalCreateResponse, type EvalRetrieveResponse as EvalRetrieveResponse, @@ -554,7 +550,6 @@ export declare namespace OpenAI { export type ComparisonFilter = API.ComparisonFilter; export type CompoundFilter = API.CompoundFilter; export type ErrorObject = API.ErrorObject; - export type EvalItem = API.EvalItem; export type FunctionDefinition = API.FunctionDefinition; export type FunctionParameters = API.FunctionParameters; export type Metadata = API.Metadata; diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index c32159776..6282d4593 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -90,7 +90,6 @@ import { ThreadDeleted, ThreadUpdateParams, Threads, - TruncationObject, } from './threads/threads'; import { Chat } from './chat/chat'; @@ -189,7 +188,6 @@ export declare namespace Beta { type AssistantToolChoiceOption as AssistantToolChoiceOption, type Thread as Thread, type ThreadDeleted as ThreadDeleted, - type TruncationObject as TruncationObject, type ThreadCreateParams as ThreadCreateParams, type ThreadUpdateParams as ThreadUpdateParams, type ThreadCreateAndRunParams as ThreadCreateAndRunParams, diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index 296fdba75..b9cef17cb 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -29,7 +29,6 @@ export { type AssistantToolChoiceOption, type Thread, type ThreadDeleted, - type TruncationObject, type ThreadCreateParams, type ThreadUpdateParams, type ThreadCreateAndRunParams, diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts index 89d00dcf6..f67a1edde 100644 --- a/src/resources/beta/threads/index.ts +++ b/src/resources/beta/threads/index.ts @@ -63,7 +63,6 @@ export { type AssistantToolChoiceOption, type Thread, type ThreadDeleted, - type TruncationObject, type ThreadCreateParams, type ThreadUpdateParams, type ThreadCreateAndRunParams, diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 608ef6481..25356df3c 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -564,7 +564,7 @@ export interface Run { * Controls for how a thread will be truncated prior to the run. Use this to * control the intial context window of the run. */ - truncation_strategy: ThreadsAPI.TruncationObject | null; + truncation_strategy: Run.TruncationStrategy | null; /** * Usage statistics related to the run. This value will be `null` if the run is not @@ -639,6 +639,26 @@ export namespace Run { } } + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ + export interface TruncationStrategy { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; + } + /** * Usage statistics related to the run. This value will be `null` if the run is not * in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -842,7 +862,7 @@ export interface RunCreateParamsBase { * Body param: Controls for how a thread will be truncated prior to the run. Use * this to control the intial context window of the run. */ - truncation_strategy?: ThreadsAPI.TruncationObject | null; + truncation_strategy?: RunCreateParams.TruncationStrategy | null; } export namespace RunCreateParams { @@ -901,6 +921,26 @@ export namespace RunCreateParams { } } + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ + export interface TruncationStrategy { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; + } + export type RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming; export type RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; } diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 8b0332fb8..c0c6bc8e4 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -346,26 +346,6 @@ export interface ThreadDeleted { object: 'thread.deleted'; } -/** - * Controls for how a thread will be truncated prior to the run. Use this to - * control the intial context window of the run. - */ -export interface TruncationObject { - /** - * The truncation strategy to use for the thread. The default is `auto`. If set to - * `last_messages`, the thread will be truncated to the n most recent messages in - * the thread. When set to `auto`, messages in the middle of the thread will be - * dropped to fit the context length of the model, `max_prompt_tokens`. - */ - type: 'auto' | 'last_messages'; - - /** - * The number of most recent messages from the thread when constructing the context - * for the run. - */ - last_messages?: number | null; -} - export interface ThreadCreateParams { /** * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to @@ -754,7 +734,7 @@ export interface ThreadCreateAndRunParamsBase { * Controls for how a thread will be truncated prior to the run. Use this to * control the intial context window of the run. */ - truncation_strategy?: TruncationObject | null; + truncation_strategy?: ThreadCreateAndRunParams.TruncationStrategy | null; } export namespace ThreadCreateAndRunParams { @@ -985,6 +965,26 @@ export namespace ThreadCreateAndRunParams { } } + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ + export interface TruncationStrategy { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; + } + export type ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; export type ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; } @@ -1684,7 +1684,6 @@ export declare namespace Threads { type AssistantToolChoiceOption as AssistantToolChoiceOption, type Thread as Thread, type ThreadDeleted as ThreadDeleted, - type TruncationObject as TruncationObject, type ThreadCreateParams as ThreadCreateParams, type ThreadUpdateParams as ThreadUpdateParams, type ThreadCreateAndRunParams as ThreadCreateAndRunParams, diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts index 5370c51b9..08c898ace 100644 --- a/src/resources/evals/evals.ts +++ b/src/resources/evals/evals.ts @@ -5,14 +5,12 @@ import { isRequestOptions } from '../../core'; import * as Core from '../../core'; import * as Shared from '../shared'; import * as GraderModelsAPI from '../graders/grader-models'; +import * as ResponsesAPI from '../responses/responses'; import * as RunsAPI from './runs/runs'; import { CreateEvalCompletionsRunDataSource, CreateEvalJSONLRunDataSource, - CreateEvalResponsesRunDataSource, EvalAPIError, - EvalJSONLFileContentSource, - EvalJSONLFileIDSource, RunCancelResponse, RunCreateParams, RunCreateResponse, @@ -106,36 +104,6 @@ export interface EvalCustomDataSourceConfig { type: 'custom'; } -/** - * A LogsDataSourceConfig which specifies the metadata property of your logs query. - * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The - * schema returned by this data source config is used to defined what variables are - * available in your evals. `item` and `sample` are both defined when using this - * data source config. - */ -export interface EvalLogsDataSourceConfig { - /** - * The json schema for the run data source items. Learn how to build JSON schemas - * [here](https://json-schema.org/). - */ - schema: Record; - - /** - * The type of data source. Always `logs`. - */ - type: 'logs'; - - /** - * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format, and - * querying for objects via API or the dashboard. - * - * Keys are strings with a maximum length of 64 characters. Values are strings with - * a maximum length of 512 characters. - */ - metadata?: Shared.Metadata | null; -} - /** * @deprecated Deprecated in favor of LogsDataSourceConfig. */ @@ -186,7 +154,7 @@ export interface EvalCreateResponse { */ data_source_config: | EvalCustomDataSourceConfig - | EvalLogsDataSourceConfig + | EvalCreateResponse.Logs | EvalStoredCompletionsDataSourceConfig; /** @@ -222,6 +190,36 @@ export interface EvalCreateResponse { } export namespace EvalCreateResponse { + /** + * A LogsDataSourceConfig which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + * schema returned by this data source config is used to defined what variables are + * available in your evals. `item` and `sample` are both defined when using this + * data source config. + */ + export interface Logs { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + /** * A TextSimilarityGrader object which grades text based on similarity metrics. */ @@ -277,7 +275,7 @@ export interface EvalRetrieveResponse { */ data_source_config: | EvalCustomDataSourceConfig - | EvalLogsDataSourceConfig + | EvalRetrieveResponse.Logs | EvalStoredCompletionsDataSourceConfig; /** @@ -313,6 +311,36 @@ export interface EvalRetrieveResponse { } export namespace EvalRetrieveResponse { + /** + * A LogsDataSourceConfig which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + * schema returned by this data source config is used to defined what variables are + * available in your evals. `item` and `sample` are both defined when using this + * data source config. + */ + export interface Logs { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + /** * A TextSimilarityGrader object which grades text based on similarity metrics. */ @@ -368,7 +396,7 @@ export interface EvalUpdateResponse { */ data_source_config: | EvalCustomDataSourceConfig - | EvalLogsDataSourceConfig + | EvalUpdateResponse.Logs | EvalStoredCompletionsDataSourceConfig; /** @@ -404,6 +432,36 @@ export interface EvalUpdateResponse { } export namespace EvalUpdateResponse { + /** + * A LogsDataSourceConfig which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + * schema returned by this data source config is used to defined what variables are + * available in your evals. `item` and `sample` are both defined when using this + * data source config. + */ + export interface Logs { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + /** * A TextSimilarityGrader object which grades text based on similarity metrics. */ @@ -459,7 +517,7 @@ export interface EvalListResponse { */ data_source_config: | EvalCustomDataSourceConfig - | EvalLogsDataSourceConfig + | EvalListResponse.Logs | EvalStoredCompletionsDataSourceConfig; /** @@ -495,6 +553,36 @@ export interface EvalListResponse { } export namespace EvalListResponse { + /** + * A LogsDataSourceConfig which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + * schema returned by this data source config is used to defined what variables are + * available in your evals. `item` and `sample` are both defined when using this + * data source config. + */ + export interface Logs { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + /** * A TextSimilarityGrader object which grades text based on similarity metrics. */ @@ -634,7 +722,7 @@ export namespace EvalCreateParams { * A list of chat messages forming the prompt or context. May include variable * references to the "item" namespace, ie {{item.name}}. */ - input: Array; + input: Array; /** * The labels to classify to each item in the evaluation. @@ -674,6 +762,48 @@ export namespace EvalCreateParams { */ role: string; } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } } /** @@ -745,7 +875,6 @@ Evals.RunListResponsesPage = RunListResponsesPage; export declare namespace Evals { export { type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, - type EvalLogsDataSourceConfig as EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse as EvalCreateResponse, type EvalRetrieveResponse as EvalRetrieveResponse, @@ -762,10 +891,7 @@ export declare namespace Evals { Runs as Runs, type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, - type CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource, type EvalAPIError as EvalAPIError, - type EvalJSONLFileContentSource as EvalJSONLFileContentSource, - type EvalJSONLFileIDSource as EvalJSONLFileIDSource, type RunCreateResponse as RunCreateResponse, type RunRetrieveResponse as RunRetrieveResponse, type RunListResponse as RunListResponse, diff --git a/src/resources/evals/index.ts b/src/resources/evals/index.ts index 084fc9ad6..b2627fbf3 100644 --- a/src/resources/evals/index.ts +++ b/src/resources/evals/index.ts @@ -4,7 +4,6 @@ export { EvalListResponsesPage, Evals, type EvalCustomDataSourceConfig, - type EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse, type EvalRetrieveResponse, @@ -20,10 +19,7 @@ export { Runs, type CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource, - type CreateEvalResponsesRunDataSource, type EvalAPIError, - type EvalJSONLFileContentSource, - type EvalJSONLFileIDSource, type RunCreateResponse, type RunRetrieveResponse, type RunListResponse, diff --git a/src/resources/evals/runs/index.ts b/src/resources/evals/runs/index.ts index 8e13e67df..d0e18bff4 100644 --- a/src/resources/evals/runs/index.ts +++ b/src/resources/evals/runs/index.ts @@ -12,10 +12,7 @@ export { Runs, type CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource, - type CreateEvalResponsesRunDataSource, type EvalAPIError, - type EvalJSONLFileContentSource, - type EvalJSONLFileIDSource, type RunCreateResponse, type RunRetrieveResponse, type RunListResponse, diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts index dec4dcb51..31883e6b5 100644 --- a/src/resources/evals/runs/runs.ts +++ b/src/resources/evals/runs/runs.ts @@ -88,8 +88,8 @@ export interface CreateEvalCompletionsRunDataSource { * A StoredCompletionsRunDataSource configuration describing a set of filters */ source: - | EvalJSONLFileContentSource - | EvalJSONLFileIDSource + | CreateEvalCompletionsRunDataSource.FileContent + | CreateEvalCompletionsRunDataSource.FileID | CreateEvalCompletionsRunDataSource.StoredCompletions; /** @@ -110,6 +110,38 @@ export interface CreateEvalCompletionsRunDataSource { } export namespace CreateEvalCompletionsRunDataSource { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + /** * A StoredCompletionsRunDataSource configuration describing a set of filters */ @@ -155,7 +187,7 @@ export namespace CreateEvalCompletionsRunDataSource { * A list of chat messages forming the prompt or context. May include variable * references to the "item" namespace, ie {{item.name}}. */ - template: Array; + template: Array; /** * The type of input messages. Always `template`. @@ -163,6 +195,50 @@ export namespace CreateEvalCompletionsRunDataSource { type: 'template'; } + export namespace Template { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Message { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Message.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Message { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + export interface ItemReference { /** * A reference to a variable in the "item" namespace. Ie, "item.name" @@ -203,7 +279,7 @@ export namespace CreateEvalCompletionsRunDataSource { * eval */ export interface CreateEvalJSONLRunDataSource { - source: EvalJSONLFileContentSource | EvalJSONLFileIDSource; + source: CreateEvalJSONLRunDataSource.FileContent | CreateEvalJSONLRunDataSource.FileID; /** * The type of data source. Always `jsonl`. @@ -211,162 +287,37 @@ export interface CreateEvalJSONLRunDataSource { type: 'jsonl'; } -/** - * A ResponsesRunDataSource object describing a model sampling configuration. - */ -export interface CreateEvalResponsesRunDataSource { - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - source: EvalJSONLFileContentSource | EvalJSONLFileIDSource | CreateEvalResponsesRunDataSource.Responses; - - /** - * The type of run data source. Always `responses`. - */ - type: 'responses'; - - input_messages?: CreateEvalResponsesRunDataSource.Template | CreateEvalResponsesRunDataSource.ItemReference; - - /** - * The name of the model to use for generating completions (e.g. "o3-mini"). - */ - model?: string; - - sampling_params?: CreateEvalResponsesRunDataSource.SamplingParams; -} - -export namespace CreateEvalResponsesRunDataSource { - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - export interface Responses { - /** - * The type of run data source. Always `responses`. - */ - type: 'responses'; - - /** - * Only include items created after this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_after?: number | null; - - /** - * Only include items created before this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_before?: number | null; - - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - - /** - * Optional string to search the 'instructions' field. This is a query parameter - * used to select responses. - */ - instructions_search?: string | null; - +export namespace CreateEvalJSONLRunDataSource { + export interface FileContent { /** - * Metadata filter for the responses. This is a query parameter used to select - * responses. + * The content of the jsonl file. */ - metadata?: unknown | null; + content: Array; /** - * The name of the model to find responses for. This is a query parameter used to - * select responses. + * The type of jsonl source. Always `file_content`. */ - model?: string | null; - - /** - * Optional reasoning effort parameter. This is a query parameter used to select - * responses. - */ - reasoning_effort?: Shared.ReasoningEffort | null; - - /** - * Sampling temperature. This is a query parameter used to select responses. - */ - temperature?: number | null; - - /** - * List of tool names. This is a query parameter used to select responses. - */ - tools?: Array | null; - - /** - * Nucleus sampling parameter. This is a query parameter used to select responses. - */ - top_p?: number | null; - - /** - * List of user identifiers. This is a query parameter used to select responses. - */ - users?: Array | null; + type: 'file_content'; } - export interface Template { - /** - * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. - */ - template: Array; + export namespace FileContent { + export interface Content { + item: Record; - /** - * The type of input messages. Always `template`. - */ - type: 'template'; - } - - export namespace Template { - export interface ChatMessage { - /** - * The content of the message. - */ - content: string; - - /** - * The role of the message (e.g. "system", "assistant", "user"). - */ - role: string; + sample?: Record; } } - export interface ItemReference { - /** - * A reference to a variable in the "item" namespace. Ie, "item.name" - */ - item_reference: string; - - /** - * The type of input messages. Always `item_reference`. - */ - type: 'item_reference'; - } - - export interface SamplingParams { - /** - * The maximum number of tokens in the generated output. - */ - max_completion_tokens?: number; - - /** - * A seed value to initialize the randomness, during sampling. - */ - seed?: number; - + export interface FileID { /** - * A higher temperature increases randomness in the outputs. + * The identifier of the file. */ - temperature?: number; + id: string; /** - * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + * The type of jsonl source. Always `file_id`. */ - top_p?: number; + type: 'file_id'; } } @@ -385,38 +336,6 @@ export interface EvalAPIError { message: string; } -export interface EvalJSONLFileContentSource { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; -} - -export namespace EvalJSONLFileContentSource { - export interface Content { - item: Record; - - sample?: Record; - } -} - -export interface EvalJSONLFileIDSource { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; -} - /** * A schema representing an evaluation run. */ @@ -437,7 +356,7 @@ export interface RunCreateResponse { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | CreateEvalResponsesRunDataSource; + | RunCreateResponse.Responses; /** * An object representing an error response from the Eval API. @@ -501,6 +420,239 @@ export interface RunCreateResponse { } export namespace RunCreateResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Responses { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: Responses.FileContent | Responses.FileID | Responses.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + input_messages?: Responses.Template | Responses.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Responses.SamplingParams; + } + + export namespace Responses { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -596,7 +748,7 @@ export interface RunRetrieveResponse { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | CreateEvalResponsesRunDataSource; + | RunRetrieveResponse.Responses; /** * An object representing an error response from the Eval API. @@ -660,6 +812,239 @@ export interface RunRetrieveResponse { } export namespace RunRetrieveResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Responses { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: Responses.FileContent | Responses.FileID | Responses.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + input_messages?: Responses.Template | Responses.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Responses.SamplingParams; + } + + export namespace Responses { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -752,10 +1137,7 @@ export interface RunListResponse { /** * Information about the run's data source. */ - data_source: - | CreateEvalJSONLRunDataSource - | CreateEvalCompletionsRunDataSource - | CreateEvalResponsesRunDataSource; + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource | RunListResponse.Responses; /** * An object representing an error response from the Eval API. @@ -787,38 +1169,271 @@ export interface RunListResponse { */ name: string; - /** - * The type of the object. Always "eval.run". - */ - object: 'eval.run'; + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunListResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunListResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Responses { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: Responses.FileContent | Responses.FileID | Responses.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + input_messages?: Responses.Template | Responses.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Responses.SamplingParams; + } + + export namespace Responses { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; - /** - * Usage statistics for each model during the evaluation run. - */ - per_model_usage: Array; + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } - /** - * Results per testing criteria applied during the evaluation run. - */ - per_testing_criteria_results: Array; + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; - /** - * The URL to the rendered evaluation run report on the UI dashboard. - */ - report_url: string; + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; - /** - * Counters summarizing the outcomes of the evaluation run. - */ - result_counts: RunListResponse.ResultCounts; + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; - /** - * The status of the evaluation run. - */ - status: string; -} + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } -export namespace RunListResponse { export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -922,7 +1537,7 @@ export interface RunCancelResponse { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | CreateEvalResponsesRunDataSource; + | RunCancelResponse.Responses; /** * An object representing an error response from the Eval API. @@ -986,6 +1601,239 @@ export interface RunCancelResponse { } export namespace RunCancelResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Responses { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: Responses.FileContent | Responses.FileID | Responses.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + input_messages?: Responses.Template | Responses.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Responses.SamplingParams; + } + + export namespace Responses { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -1068,7 +1916,7 @@ export interface RunCreateParams { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | CreateEvalResponsesRunDataSource; + | RunCreateParams.CreateEvalResponsesRunDataSource; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -1086,6 +1934,246 @@ export interface RunCreateParams { name?: string; } +export namespace RunCreateParams { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface CreateEvalResponsesRunDataSource { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: + | CreateEvalResponsesRunDataSource.FileContent + | CreateEvalResponsesRunDataSource.FileID + | CreateEvalResponsesRunDataSource.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + input_messages?: + | CreateEvalResponsesRunDataSource.Template + | CreateEvalResponsesRunDataSource.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: CreateEvalResponsesRunDataSource.SamplingParams; + } + + export namespace CreateEvalResponsesRunDataSource { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } +} + export interface RunListParams extends CursorPageParams { /** * Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for @@ -1108,10 +2196,7 @@ export declare namespace Runs { export { type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, - type CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource, type EvalAPIError as EvalAPIError, - type EvalJSONLFileContentSource as EvalJSONLFileContentSource, - type EvalJSONLFileIDSource as EvalJSONLFileIDSource, type RunCreateResponse as RunCreateResponse, type RunRetrieveResponse as RunRetrieveResponse, type RunListResponse as RunListResponse, diff --git a/src/resources/graders/grader-models.ts b/src/resources/graders/grader-models.ts index 8a1a0eddd..d2c335300 100644 --- a/src/resources/graders/grader-models.ts +++ b/src/resources/graders/grader-models.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; -import * as Shared from '../shared'; +import * as ResponsesAPI from '../responses/responses'; export class GraderModels extends APIResource {} @@ -10,7 +10,7 @@ export class GraderModels extends APIResource {} * the evaluation. */ export interface LabelModelGrader { - input: Array; + input: Array; /** * The labels to assign to each item in the evaluation. @@ -38,6 +38,50 @@ export interface LabelModelGrader { type: 'label_model'; } +export namespace LabelModelGrader { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } +} + /** * A MultiGrader object combines the output of multiple graders to produce a single * score. @@ -59,7 +103,7 @@ export interface MultiGrader { name: string; /** - * The type of grader. + * The object type, which is always `multi`. */ type: 'multi'; } @@ -96,7 +140,7 @@ export interface ScoreModelGrader { /** * The input text. This may include template strings. */ - input: Array; + input: Array; /** * The model to use for the evaluation. @@ -124,6 +168,50 @@ export interface ScoreModelGrader { sampling_params?: unknown; } +export namespace ScoreModelGrader { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } +} + /** * A StringCheckGrader object that performs a string comparison between input and * reference using a specified operation. diff --git a/src/resources/index.ts b/src/resources/index.ts index 0f21e596c..9d827615c 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -33,7 +33,6 @@ export { EvalListResponsesPage, Evals, type EvalCustomDataSourceConfig, - type EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse, type EvalRetrieveResponse, @@ -91,7 +90,6 @@ export { type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, - type VectorStoreExpirationAfter, type VectorStoreSearchResponse, type VectorStoreCreateParams, type VectorStoreUpdateParams, diff --git a/src/resources/shared.ts b/src/resources/shared.ts index d0c2aaa49..adea184fd 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,7 +1,5 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as ResponsesAPI from './responses/responses'; - export type AllModels = | (string & {}) | ChatModel @@ -43,6 +41,7 @@ export type ChatModel = | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview-2025-03-11' | 'chatgpt-4o-latest' + | 'codex-mini-latest' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' @@ -120,48 +119,6 @@ export interface ErrorObject { type: string; } -/** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ -export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; -} - -export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } -} - export interface FunctionDefinition { /** * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain diff --git a/src/resources/vector-stores/index.ts b/src/resources/vector-stores/index.ts index c3c042387..9cbcbc0b2 100644 --- a/src/resources/vector-stores/index.ts +++ b/src/resources/vector-stores/index.ts @@ -30,7 +30,6 @@ export { type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, - type VectorStoreExpirationAfter, type VectorStoreSearchResponse, type VectorStoreCreateParams, type VectorStoreUpdateParams, diff --git a/src/resources/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts index 66438be02..7d61e7fd6 100644 --- a/src/resources/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -249,7 +249,7 @@ export interface VectorStore { /** * The expiration policy for a vector store. */ - expires_after?: VectorStoreExpirationAfter; + expires_after?: VectorStore.ExpiresAfter; /** * The Unix timestamp (in seconds) for when the vector store will expire. @@ -284,6 +284,22 @@ export namespace VectorStore { */ total: number; } + + /** + * The expiration policy for a vector store. + */ + export interface ExpiresAfter { + /** + * Anchor timestamp after which the expiration policy applies. Supported anchors: + * `last_active_at`. + */ + anchor: 'last_active_at'; + + /** + * The number of days after the anchor time that the vector store will expire. + */ + days: number; + } } export interface VectorStoreDeleted { @@ -294,22 +310,6 @@ export interface VectorStoreDeleted { object: 'vector_store.deleted'; } -/** - * The expiration policy for a vector store. - */ -export interface VectorStoreExpirationAfter { - /** - * Anchor timestamp after which the expiration policy applies. Supported anchors: - * `last_active_at`. - */ - anchor: 'last_active_at'; - - /** - * The number of days after the anchor time that the vector store will expire. - */ - days: number; -} - export interface VectorStoreSearchResponse { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -365,7 +365,7 @@ export interface VectorStoreCreateParams { /** * The expiration policy for a vector store. */ - expires_after?: VectorStoreExpirationAfter; + expires_after?: VectorStoreCreateParams.ExpiresAfter; /** * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that @@ -390,11 +390,29 @@ export interface VectorStoreCreateParams { name?: string; } +export namespace VectorStoreCreateParams { + /** + * The expiration policy for a vector store. + */ + export interface ExpiresAfter { + /** + * Anchor timestamp after which the expiration policy applies. Supported anchors: + * `last_active_at`. + */ + anchor: 'last_active_at'; + + /** + * The number of days after the anchor time that the vector store will expire. + */ + days: number; + } +} + export interface VectorStoreUpdateParams { /** * The expiration policy for a vector store. */ - expires_after?: VectorStoreExpirationAfter | null; + expires_after?: VectorStoreUpdateParams.ExpiresAfter | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -412,6 +430,24 @@ export interface VectorStoreUpdateParams { name?: string | null; } +export namespace VectorStoreUpdateParams { + /** + * The expiration policy for a vector store. + */ + export interface ExpiresAfter { + /** + * Anchor timestamp after which the expiration policy applies. Supported anchors: + * `last_active_at`. + */ + anchor: 'last_active_at'; + + /** + * The number of days after the anchor time that the vector store will expire. + */ + days: number; + } +} + export interface VectorStoreListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place @@ -485,7 +521,6 @@ export declare namespace VectorStores { type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, - type VectorStoreExpirationAfter as VectorStoreExpirationAfter, type VectorStoreSearchResponse as VectorStoreSearchResponse, VectorStoresPage as VectorStoresPage, VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, From 416c89bc773e6a79a238123467b4431da39c072d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 17:11:44 +0000 Subject: [PATCH 243/246] release: 4.99.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 10 ++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 14 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a279d9124..a65cc01f9 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.98.0" + ".": "4.99.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f1a39177..2a552b8ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Changelog +## 4.99.0 (2025-05-16) + +Full Changelog: [v4.98.0...v4.99.0](https://github.com/openai/openai-node/compare/v4.98.0...v4.99.0) + +### Features + +* **api:** manual updates ([75eb804](https://github.com/openai/openai-node/commit/75eb804edd6ad653eaa22d47f8c6d09ee845ebf4)) +* **api:** responses x eval api ([5029f1a](https://github.com/openai/openai-node/commit/5029f1a05eb1e8601ada06e0a5ba49f4c2b83c02)) +* **api:** Updating Assistants and Evals API schemas ([27fd517](https://github.com/openai/openai-node/commit/27fd5173b20f75debe96024ae8f1ce58a8254d26)) + ## 4.98.0 (2025-05-08) Full Changelog: [v4.97.0...v4.98.0](https://github.com/openai/openai-node/compare/v4.97.0...v4.98.0) diff --git a/jsr.json b/jsr.json index 25bbc9ac2..2f29927c6 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.98.0", + "version": "4.99.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index d34efceb0..0d756ef85 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.98.0", + "version": "4.99.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index f64cc03ff..c7ee5a162 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.98.0'; // x-release-please-version +export const VERSION = '4.99.0'; // x-release-please-version From 5123fe08a56f3d0040b1cc67129382f3eacc3cca Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 19:35:22 +0000 Subject: [PATCH 244/246] chore(internal): version bump From 3f6f248191b45015924be76fd5154d149c4ed8a0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 19:41:32 +0000 Subject: [PATCH 245/246] feat(api): further updates for evals API --- .stats.yml | 4 +- src/resources/beta/realtime/realtime.ts | 2 +- .../beta/realtime/transcription-sessions.ts | 2 +- src/resources/evals/evals.ts | 31 ++--- src/resources/evals/runs/runs.ts | 109 ++++++++++-------- 5 files changed, 82 insertions(+), 66 deletions(-) diff --git a/.stats.yml b/.stats.yml index a3c5d081d..afa33d93b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 101 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5fa16b9a02985ae06e41be14946a9c325dc672fb014b3c19abca65880c6990e6.yml -openapi_spec_hash: da3e669f65130043b1170048c0727890 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-262e171d0a8150ea1192474d16ba3afdf9a054b399f1a49a9c9b697a3073c136.yml +openapi_spec_hash: 33e00a48df8f94c94f46290c489f132b config_hash: d8d5fda350f6db77c784f35429741a2e diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index 5012b1edd..26fba883e 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -2571,7 +2571,7 @@ export interface TranscriptionSessionUpdatedEvent { * A new Realtime transcription session configuration. * * When a session is created on the server via REST API, the session object also - * contains an ephemeral key. Default TTL for keys is one minute. This property is + * contains an ephemeral key. Default TTL for keys is 10 minutes. This property is * not present when a session is updated via the WebSocket API. */ session: TranscriptionSessionsAPI.TranscriptionSession; diff --git a/src/resources/beta/realtime/transcription-sessions.ts b/src/resources/beta/realtime/transcription-sessions.ts index 61e58a8e8..83e8c47ad 100644 --- a/src/resources/beta/realtime/transcription-sessions.ts +++ b/src/resources/beta/realtime/transcription-sessions.ts @@ -35,7 +35,7 @@ export class TranscriptionSessions extends APIResource { * A new Realtime transcription session configuration. * * When a session is created on the server via REST API, the session object also - * contains an ephemeral key. Default TTL for keys is one minute. This property is + * contains an ephemeral key. Default TTL for keys is 10 minutes. This property is * not present when a session is updated via the WebSocket API. */ export interface TranscriptionSession { diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts index 08c898ace..05a656619 100644 --- a/src/resources/evals/evals.ts +++ b/src/resources/evals/evals.ts @@ -28,7 +28,8 @@ export class Evals extends APIResource { /** * Create the structure of an evaluation that can be used to test a model's - * performance. An evaluation is a set of testing criteria and a datasource. After + * performance. An evaluation is a set of testing criteria and the config for a + * data source, which dictates the schema of the data used in the evaluation. After * creating an evaluation, you can run it on different models and model parameters. * We support several types of graders and datasources. For more information, see * the [Evals guide](https://platform.openai.com/docs/guides/evals). @@ -115,9 +116,9 @@ export interface EvalStoredCompletionsDataSourceConfig { schema: Record; /** - * The type of data source. Always `stored-completions`. + * The type of data source. Always `stored_completions`. */ - type: 'stored-completions'; + type: 'stored_completions'; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -136,7 +137,7 @@ export interface EvalStoredCompletionsDataSourceConfig { * * - Improve the quality of my chatbot * - See how well my chatbot handles customer support - * - Check if o3-mini is better at my usecase than gpt-4o + * - Check if o4-mini is better at my usecase than gpt-4o */ export interface EvalCreateResponse { /** @@ -257,7 +258,7 @@ export namespace EvalCreateResponse { * * - Improve the quality of my chatbot * - See how well my chatbot handles customer support - * - Check if o3-mini is better at my usecase than gpt-4o + * - Check if o4-mini is better at my usecase than gpt-4o */ export interface EvalRetrieveResponse { /** @@ -378,7 +379,7 @@ export namespace EvalRetrieveResponse { * * - Improve the quality of my chatbot * - See how well my chatbot handles customer support - * - Check if o3-mini is better at my usecase than gpt-4o + * - Check if o4-mini is better at my usecase than gpt-4o */ export interface EvalUpdateResponse { /** @@ -499,7 +500,7 @@ export namespace EvalUpdateResponse { * * - Improve the quality of my chatbot * - See how well my chatbot handles customer support - * - Check if o3-mini is better at my usecase than gpt-4o + * - Check if o4-mini is better at my usecase than gpt-4o */ export interface EvalListResponse { /** @@ -624,12 +625,16 @@ export interface EvalDeleteResponse { export interface EvalCreateParams { /** - * The configuration for the data source used for the evaluation runs. + * The configuration for the data source used for the evaluation runs. Dictates the + * schema of the data used in the evaluation. */ data_source_config: EvalCreateParams.Custom | EvalCreateParams.Logs | EvalCreateParams.StoredCompletions; /** - * A list of graders for all eval runs in this group. + * A list of graders for all eval runs in this group. Graders can reference + * variables in the data source using double curly braces notation, like + * `{{item.variable_name}}`. To reference the model's output, use the `sample` + * namespace (ie, `{{sample.output_text}}`). */ testing_criteria: Array< | EvalCreateParams.LabelModel @@ -699,13 +704,13 @@ export namespace EvalCreateParams { } /** - * Deprecated in favor of LogsDataSourceConfig. + * @deprecated Deprecated in favor of LogsDataSourceConfig. */ export interface StoredCompletions { /** - * The type of data source. Always `stored-completions`. + * The type of data source. Always `stored_completions`. */ - type: 'stored-completions'; + type: 'stored_completions'; /** * Metadata filters for the stored completions data source. @@ -720,7 +725,7 @@ export namespace EvalCreateParams { export interface LabelModel { /** * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. + * references to the `item` namespace, ie {{item.name}}. */ input: Array; diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts index 31883e6b5..e761e2160 100644 --- a/src/resources/evals/runs/runs.ts +++ b/src/resources/evals/runs/runs.ts @@ -19,7 +19,9 @@ export class Runs extends APIResource { outputItems: OutputItemsAPI.OutputItems = new OutputItemsAPI.OutputItems(this._client); /** - * Create a new evaluation run. This is the endpoint that will kick off grading. + * Kicks off a new run for a given evaluation, specifying the data source, and what + * model configuration to use to test. The datasource will be validated against the + * schema specified in the config of the evaluation. */ create( evalId: string, @@ -85,7 +87,7 @@ export class RunListResponsesPage extends CursorPage {} */ export interface CreateEvalCompletionsRunDataSource { /** - * A StoredCompletionsRunDataSource configuration describing a set of filters + * Determines what populates the `item` namespace in this run's data source. */ source: | CreateEvalCompletionsRunDataSource.FileContent @@ -97,6 +99,12 @@ export interface CreateEvalCompletionsRunDataSource { */ type: 'completions'; + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ input_messages?: | CreateEvalCompletionsRunDataSource.Template | CreateEvalCompletionsRunDataSource.ItemReference; @@ -185,7 +193,7 @@ export namespace CreateEvalCompletionsRunDataSource { export interface Template { /** * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. + * references to the `item` namespace, ie {{item.name}}. */ template: Array; @@ -241,7 +249,7 @@ export namespace CreateEvalCompletionsRunDataSource { export interface ItemReference { /** - * A reference to a variable in the "item" namespace. Ie, "item.name" + * A reference to a variable in the `item` namespace. Ie, "item.input_trajectory" */ item_reference: string; @@ -279,6 +287,9 @@ export namespace CreateEvalCompletionsRunDataSource { * eval */ export interface CreateEvalJSONLRunDataSource { + /** + * Determines what populates the `item` namespace in the data source. + */ source: CreateEvalJSONLRunDataSource.FileContent | CreateEvalJSONLRunDataSource.FileID; /** @@ -425,7 +436,7 @@ export namespace RunCreateResponse { */ export interface Responses { /** - * A EvalResponsesSource object describing a run data source configuration. + * Determines what populates the `item` namespace in this run's data source. */ source: Responses.FileContent | Responses.FileID | Responses.Responses; @@ -434,6 +445,12 @@ export namespace RunCreateResponse { */ type: 'responses'; + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ input_messages?: Responses.Template | Responses.ItemReference; /** @@ -498,12 +515,6 @@ export namespace RunCreateResponse { */ created_before?: number | null; - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - /** * Optional string to search the 'instructions' field. This is a query parameter * used to select responses. @@ -552,7 +563,7 @@ export namespace RunCreateResponse { export interface Template { /** * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. + * references to the `item` namespace, ie {{item.name}}. */ template: Array; @@ -620,7 +631,7 @@ export namespace RunCreateResponse { export interface ItemReference { /** - * A reference to a variable in the "item" namespace. Ie, "item.name" + * A reference to a variable in the `item` namespace. Ie, "item.name" */ item_reference: string; @@ -817,7 +828,7 @@ export namespace RunRetrieveResponse { */ export interface Responses { /** - * A EvalResponsesSource object describing a run data source configuration. + * Determines what populates the `item` namespace in this run's data source. */ source: Responses.FileContent | Responses.FileID | Responses.Responses; @@ -826,6 +837,12 @@ export namespace RunRetrieveResponse { */ type: 'responses'; + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ input_messages?: Responses.Template | Responses.ItemReference; /** @@ -890,12 +907,6 @@ export namespace RunRetrieveResponse { */ created_before?: number | null; - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - /** * Optional string to search the 'instructions' field. This is a query parameter * used to select responses. @@ -944,7 +955,7 @@ export namespace RunRetrieveResponse { export interface Template { /** * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. + * references to the `item` namespace, ie {{item.name}}. */ template: Array; @@ -1012,7 +1023,7 @@ export namespace RunRetrieveResponse { export interface ItemReference { /** - * A reference to a variable in the "item" namespace. Ie, "item.name" + * A reference to a variable in the `item` namespace. Ie, "item.name" */ item_reference: string; @@ -1206,7 +1217,7 @@ export namespace RunListResponse { */ export interface Responses { /** - * A EvalResponsesSource object describing a run data source configuration. + * Determines what populates the `item` namespace in this run's data source. */ source: Responses.FileContent | Responses.FileID | Responses.Responses; @@ -1215,6 +1226,12 @@ export namespace RunListResponse { */ type: 'responses'; + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ input_messages?: Responses.Template | Responses.ItemReference; /** @@ -1279,12 +1296,6 @@ export namespace RunListResponse { */ created_before?: number | null; - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - /** * Optional string to search the 'instructions' field. This is a query parameter * used to select responses. @@ -1333,7 +1344,7 @@ export namespace RunListResponse { export interface Template { /** * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. + * references to the `item` namespace, ie {{item.name}}. */ template: Array; @@ -1401,7 +1412,7 @@ export namespace RunListResponse { export interface ItemReference { /** - * A reference to a variable in the "item" namespace. Ie, "item.name" + * A reference to a variable in the `item` namespace. Ie, "item.name" */ item_reference: string; @@ -1606,7 +1617,7 @@ export namespace RunCancelResponse { */ export interface Responses { /** - * A EvalResponsesSource object describing a run data source configuration. + * Determines what populates the `item` namespace in this run's data source. */ source: Responses.FileContent | Responses.FileID | Responses.Responses; @@ -1615,6 +1626,12 @@ export namespace RunCancelResponse { */ type: 'responses'; + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ input_messages?: Responses.Template | Responses.ItemReference; /** @@ -1679,12 +1696,6 @@ export namespace RunCancelResponse { */ created_before?: number | null; - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - /** * Optional string to search the 'instructions' field. This is a query parameter * used to select responses. @@ -1733,7 +1744,7 @@ export namespace RunCancelResponse { export interface Template { /** * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. + * references to the `item` namespace, ie {{item.name}}. */ template: Array; @@ -1801,7 +1812,7 @@ export namespace RunCancelResponse { export interface ItemReference { /** - * A reference to a variable in the "item" namespace. Ie, "item.name" + * A reference to a variable in the `item` namespace. Ie, "item.name" */ item_reference: string; @@ -1940,7 +1951,7 @@ export namespace RunCreateParams { */ export interface CreateEvalResponsesRunDataSource { /** - * A EvalResponsesSource object describing a run data source configuration. + * Determines what populates the `item` namespace in this run's data source. */ source: | CreateEvalResponsesRunDataSource.FileContent @@ -1952,6 +1963,12 @@ export namespace RunCreateParams { */ type: 'responses'; + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ input_messages?: | CreateEvalResponsesRunDataSource.Template | CreateEvalResponsesRunDataSource.ItemReference; @@ -2018,12 +2035,6 @@ export namespace RunCreateParams { */ created_before?: number | null; - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - /** * Optional string to search the 'instructions' field. This is a query parameter * used to select responses. @@ -2072,7 +2083,7 @@ export namespace RunCreateParams { export interface Template { /** * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. + * references to the `item` namespace, ie {{item.name}}. */ template: Array; @@ -2140,7 +2151,7 @@ export namespace RunCreateParams { export interface ItemReference { /** - * A reference to a variable in the "item" namespace. Ie, "item.name" + * A reference to a variable in the `item` namespace. Ie, "item.name" */ item_reference: string; From 29e608f24af8880fcdc0d45cc64321e4856e47ba Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 19:42:24 +0000 Subject: [PATCH 246/246] release: 4.100.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a65cc01f9..989bed91e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.99.0" + ".": "4.100.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a552b8ed..adda41e52 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.100.0 (2025-05-16) + +Full Changelog: [v4.99.0...v4.100.0](https://github.com/openai/openai-node/compare/v4.99.0...v4.100.0) + +### Features + +* **api:** further updates for evals API ([3f6f248](https://github.com/openai/openai-node/commit/3f6f248191b45015924be76fd5154d149c4ed8a0)) + + +### Chores + +* **internal:** version bump ([5123fe0](https://github.com/openai/openai-node/commit/5123fe08a56f3d0040b1cc67129382f3eacc3cca)) + ## 4.99.0 (2025-05-16) Full Changelog: [v4.98.0...v4.99.0](https://github.com/openai/openai-node/compare/v4.98.0...v4.99.0) diff --git a/jsr.json b/jsr.json index 2f29927c6..3c2d41b0f 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.99.0", + "version": "4.100.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 0d756ef85..23205e569 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.99.0", + "version": "4.100.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c7ee5a162..62b43ffce 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.99.0'; // x-release-please-version +export const VERSION = '4.100.0'; // x-release-please-version